diff --git a/.github/PULL_REQUEST_TEMPLATE/rfc.md b/.github/PULL_REQUEST_TEMPLATE/rfc.md deleted file mode 100644 index 3502f132623..00000000000 --- a/.github/PULL_REQUEST_TEMPLATE/rfc.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: RFC Template -about: A design RFC for Zebra -title: '' -labels: C-design -assignees: '' ---- - - - -## RFC Summary - -**Please copy the RFC summary here.** - -### Context - -**Please copy the RFC header here.** - -Feature Name: `my_feature` - -Start Date: YYYY-MM-DD - -Design PR: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/pull/0000) - -Zebra Issue: [ZcashFoundation/zebra#0000](https://github.com/ZcashFoundation/zebra/issues/0000) - -### Rendered - - -[Rendered](https://github.com/ZcashFoundation/zebra/blob/my-branch-name/book/src/dev/rfcs/drafts/xxxx-my-feature.md). - -## Zebra Team Approval - -Most of the Zebra team should review design RFCs: - -- [ ] @conradoplg -- [ ] @dconnolly -- [ ] @oxarbitrage -- [ ] @jvff -- [ ] @mpguerra -- [ ] @teor2345 diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 799068af4f7..9bed872f1ac 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -82,9 +82,6 @@ on: - '.github/workflows/sub-build-docker-image.yml' env: - # We need to combine the features manually because some tests don't use the Docker entrypoint - TEST_FEATURES: ${{ format('{0} {1}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES) }} - EXPERIMENTAL_FEATURES: ${{ format('{0} {1} {2}', vars.RUST_PROD_FEATURES, vars.RUST_TEST_FEATURES, vars.RUST_EXPERIMENTAL_FEATURES) }} RUST_LOG: ${{ vars.RUST_LOG }} RUST_BACKTRACE: ${{ vars.RUST_BACKTRACE }} RUST_LIB_BACKTRACE: ${{ vars.RUST_LIB_BACKTRACE }} @@ -135,27 +132,21 @@ jobs: # # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. # - # TODO: move this test command into entrypoint.sh - name: Run zebrad tests env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --workspace -- --include-ignored + docker run --tty -e NETWORK -e RUN_ALL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Run unit, basic acceptance tests, and ignored tests with experimental features. # - # TODO: move this test command into entrypoint.sh - name: Run zebrad tests with experimental features env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} run: | - # GitHub doesn't allow empty variables - if [[ -n "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" && "${{ vars.RUST_EXPERIMENTAL_FEATURES }}" != " " ]]; then - docker run -e NETWORK --name zebrad-tests-experimental --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.EXPERIMENTAL_FEATURES }} " --workspace -- --include-ignored - else - echo "Experimental builds are disabled, set RUST_EXPERIMENTAL_FEATURES in GitHub actions variables to enable them" - fi + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e RUN_ALL_EXPERIMENTAL_TESTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Run state tests with fake activation heights. # @@ -179,15 +170,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - # make sure that at least one test runs, and that it doesn't skip itself due to the environmental variable - name: Run tests with fake activation heights - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "zebra-test" --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights env: - TEST_FAKE_ACTIVATION_HEIGHTS: '1' NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. test-empty-sync: @@ -203,13 +191,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - name: Run zebrad large sync tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e TEST_ZEBRA_EMPTY_SYNC=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test launching lightwalletd with an empty lightwalletd and Zebra state. test-lightwalletd-integration: @@ -225,14 +212,12 @@ jobs: with: short-length: 7 - # TODO: move this test command into entrypoint.sh - name: Run tests with empty lightwalletd launch - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} - docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} cargo test --locked --release --features "${{ env.TEST_FEATURES }}" --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration env: - ZEBRA_TEST_LIGHTWALLETD: '1' NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} + docker run --tty -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD=1 ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}@${{ needs.build.outputs.image_digest }} # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: diff --git a/.gitignore b/.gitignore index ef29d45439f..b4af6c0b81e 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ .zebra-state/ # Nix configs shell.nix +# Docker compose env files +*.env # ---- Below here this is an autogenerated .gitignore using Toptal ---- # Created by https://www.toptal.com/developers/gitignore/api/firebase,emacs,visualstudiocode,rust,windows,macos diff --git a/Cargo.lock b/Cargo.lock index d6ea681b917..8b740da67ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -721,9 +721,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.32" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41daef31d7a747c5c847246f36de49ced6f7403b4cdabc807a97b5cc184cda7a" +checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" dependencies = [ "android-tzdata", "iana-time-zone", @@ -1697,7 +1697,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap 2.1.0", + "indexmap 2.2.1", "slab", "tokio", "tokio-util 0.7.10", @@ -2073,9 +2073,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "433de089bd45971eecf4668ee0ee8f4cec17db4f8bd8f7bc3197a6ce37aa7d9b" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -2184,9 +2184,9 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ "either", ] @@ -2993,23 +2993,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.2.1", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", @@ -3934,9 +3934,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02" +checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" dependencies = [ "serde_derive", ] @@ -3952,9 +3952,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.195" +version = "1.0.196" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c" +checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", @@ -3963,11 +3963,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.111" +version = "1.0.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176e46fa42316f18edd598015a5166857fc835ec732f5215eac6b7bdbf0a84f4" +checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.1", "itoa", "ryu", "serde", @@ -4006,18 +4006,18 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.5.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58c3a1b3e418f61c25b2aeb43fc6c95eaa252b8cecdda67f401943e9e08d33f" +checksum = "f5c9fdb6b00a489875b22efd4b78fe2b363b72265cc5f6eb2e2b9ee270e6140c" dependencies = [ "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.1.0", + "indexmap 2.2.1", "serde", "serde_json", - "serde_with_macros 3.5.0", + "serde_with_macros 3.5.1", "time", ] @@ -4035,9 +4035,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.5.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2068b437a31fc68f25dd7edc296b078f04b45145c199d8eed9866e45f1ff274" +checksum = "dbff351eb4b33600a2e138dfa0b10b65a238ea8ff8fb2387c422c5022a3e8298" dependencies = [ "darling 0.20.3", "proc-macro2", @@ -4577,7 +4577,7 @@ version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.1", "toml_datetime", "winnow", ] @@ -4588,7 +4588,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.2.1", "serde", "serde_spanned", "toml_datetime", @@ -5678,7 +5678,7 @@ dependencies = [ "hex", "humantime", "incrementalmerkletree", - "itertools 0.12.0", + "itertools 0.12.1", "jubjub", "lazy_static", "num-integer", @@ -5697,7 +5697,7 @@ dependencies = [ "serde", "serde-big-array", "serde_json", - "serde_with 3.5.0", + "serde_with 3.5.1", "sha2", "spandoc", "static_assertions", @@ -5708,6 +5708,7 @@ dependencies = [ "uint", "x25519-dalek", "zcash_address", + "zcash_client_backend", "zcash_encoding", "zcash_history", "zcash_note_encryption", @@ -5789,8 +5790,8 @@ dependencies = [ "hex", "howudoin", "humantime-serde", - "indexmap 2.1.0", - "itertools 0.12.0", + "indexmap 2.2.1", + "itertools 0.12.1", "lazy_static", "metrics 0.22.0", "num-integer", @@ -5837,7 +5838,7 @@ dependencies = [ "futures", "hex", "hyper", - "indexmap 2.1.0", + "indexmap 2.2.1", "insta", "jsonrpc-core", "jsonrpc-derive", @@ -5870,9 +5871,9 @@ dependencies = [ "ff", "futures", "group", - "indexmap 2.1.0", + "indexmap 2.2.1", "insta", - "itertools 0.12.0", + "itertools 0.12.1", "jubjub", "proptest", "proptest-derive", @@ -5920,9 +5921,9 @@ dependencies = [ "hex-literal", "howudoin", "humantime-serde", - "indexmap 2.1.0", + "indexmap 2.2.1", "insta", - "itertools 0.12.0", + "itertools 0.12.1", "jubjub", "lazy_static", "metrics 0.22.0", @@ -5957,9 +5958,9 @@ dependencies = [ "futures", "hex", "humantime", - "indexmap 2.1.0", + "indexmap 2.2.1", "insta", - "itertools 0.12.0", + "itertools 0.12.1", "lazy_static", "once_cell", "owo-colors 4.0.0", @@ -5983,7 +5984,7 @@ version = "1.0.0-beta.34" dependencies = [ "color-eyre", "hex", - "itertools 0.12.0", + "itertools 0.12.1", "jsonrpc", "regex", "reqwest", @@ -6019,7 +6020,7 @@ dependencies = [ "howudoin", "humantime-serde", "hyper", - "indexmap 2.1.0", + "indexmap 2.2.1", "indicatif", "inferno", "insta", diff --git a/book/src/user/docker.md b/book/src/user/docker.md index 5546d1f4495..bf0f33d9704 100644 --- a/book/src/user/docker.md +++ b/book/src/user/docker.md @@ -4,14 +4,34 @@ The easiest way to run Zebra is using [Docker](https://docs.docker.com/get-docke We've embraced Docker in Zebra for most of the solution lifecycle, from development environments to CI (in our pipelines), and deployment to end users. +> [!TIP] +> We recommend using `docker compose` sub-command over the plain `docker` CLI, especially for more advanced use-cases like running CI locally, as it provides a more convenient and powerful way to manage multi-container Docker applications. See [CI/CD Local Testing](#cicd-local-testing) for more information, and other compose files available in the [docker](https://github.com/ZcashFoundation/zebra/tree/main/docker) folder. + ## Quick usage You can deploy Zebra for daily use with the images available in [Docker Hub](https://hub.docker.com/r/zfnd/zebra) or build it locally for testing. ### Ready to use image +Using `docker compose`: + ```shell -docker run --detach zfnd/zebra:latest +docker compose -f docker/docker-compose.yml up +``` + +With plain `docker` CLI: + +```shell +docker volume create zebrad-cache + +docker run -d --platform linux/amd64 \ + --restart unless-stopped \ + --env-file .env \ + --mount type=volume,source=zebrad-cache,target=/var/cache/zebrad-cache \ + -p 8233:8233 \ + --memory 16G \ + --cpus 4 \ + zfnd/zebra ``` ### Build it locally @@ -32,7 +52,7 @@ You're able to specify various parameters when building or launching the Docker For example, if we'd like to enable metrics on the image, we'd build it using the following `build-arg`: -> [!WARNING] +> [!IMPORTANT] > To fully use and display the metrics, you'll need to run a Prometheus and Grafana server, and configure it to scrape and visualize the metrics endpoint. This is explained in more detailed in the [Metrics](https://zebra.zfnd.org/user/metrics.html#zebra-metrics) section of the User Guide. ```shell @@ -63,11 +83,53 @@ cache_dir = "/var/cache/zebrad-cache" endpoint_addr = "127.0.0.1:9999" ``` -### Build time arguments +### Running Zebra with Lightwalletd + +To run Zebra with Lightwalletd, we recommend using the provided `docker compose` files for Zebra and Lightwalletd, which will start both services and connect them together, while exposing ports, mounting volumes, and setting environment variables. + +```shell +docker compose -f docker/docker-compose.yml -f docker/docker-compose.lwd.yml up +``` + +### CI/CD Local Testing + +To run CI tests locally, which mimics the testing done in our CI pipelines on GitHub Actions, use the `docker-compose.test.yml` file. This setup allows for a consistent testing environment both locally and in CI. + +#### Running Tests Locally + +1. **Setting Environment Variables**: + - Modify the `test.env` file to set the desired test configurations. + - For running all tests, set `RUN_ALL_TESTS=1` in `test.env`. + +2. **Starting the Test Environment**: + - Use Docker Compose to start the testing environment: + + ```shell + docker-compose -f docker/docker-compose.test.yml up + ``` + + - This will start the Docker container and run the tests based on `test.env` settings. + +3. **Viewing Test Output**: + - The test results and logs will be displayed in the terminal. + +4. **Stopping the Environment**: + - Once testing is complete, stop the environment using: + + ```shell + docker-compose -f docker/docker-compose.test.yml down + ``` + +This approach ensures you can run the same tests locally that are run in CI, providing a robust way to validate changes before pushing to the repository. + +### Build and Run Time Configuration + +#### Build Time Arguments #### Configuration - `FEATURES`: Specifies the features to build `zebrad` with. Example: `"default-release-binaries getblocktemplate-rpcs"` +- `TEST_FEATURES`: Specifies the features for tests. Example: `"lightwalletd-grpc-tests zebra-checkpoints"` #### Logging @@ -86,9 +148,7 @@ endpoint_addr = "127.0.0.1:9999" - `SHORT_SHA`: Represents the short SHA of the commit. Example: `"a1b2c3d"` -### Run time variables - -#### Network +#### Run Time Variables - `NETWORK`: Specifies the network type. Example: `"Mainnet"` @@ -113,6 +173,8 @@ endpoint_addr = "127.0.0.1:9999" - `TRACING_ENDPOINT_ADDR`: Address for tracing endpoint. Example: `"0.0.0.0"` - `TRACING_ENDPOINT_PORT`: Port for tracing endpoint. Example: `3000` +Specific tests are defined in `docker/test.env` file and can be enabled by setting the corresponding environment variable to `1`. + ## Registries The images built by the Zebra team are all publicly hosted. Old image versions meant to be used by our [CI pipeline](https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/ci-integration-tests-gcp.yml) (`zebrad-test`, `lighwalletd`) might be deleted on a scheduled basis. diff --git a/book/src/user/lightwalletd.md b/book/src/user/lightwalletd.md index 230bb879e34..4688c89e519 100644 --- a/book/src/user/lightwalletd.md +++ b/book/src/user/lightwalletd.md @@ -6,17 +6,21 @@ recommend using use it in testing. Other `lightwalletd` forks have limited support, see the [Sync lightwalletd](#sync-lightwalletd) section for more info. +> [!NOTE] +> You can also use `docker` to run lightwalletd with zebra. Please see our [docker documentation](./docker.md#running-zebra-with-lightwalletd) for more information. + Contents: -- [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) - - [JSON-RPC](#json-rpc) -- [Sync Zebra](#sync-zebra) -- [Download and build lightwalletd](#download-and-build-lightwalletd) -- [Sync lightwalletd](#sync-lightwalletd) -- [Run tests](#run-tests) -- [Connect wallet to lightwalletd](#connect-wallet-to-lightwalletd) - - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) - - [Run the wallet](#run-the-wallet) +- [Running lightwalletd with zebra](#running-lightwalletd-with-zebra) + - [Configure zebra for lightwalletd](#configure-zebra-for-lightwalletd) + - [JSON-RPC](#json-rpc) + - [Sync Zebra](#sync-zebra) + - [Download and build lightwalletd](#download-and-build-lightwalletd) + - [Sync lightwalletd](#sync-lightwalletd) + - [Run tests](#run-tests) + - [Connect a wallet to lightwalletd](#connect-a-wallet-to-lightwalletd) + - [Download and build the cli-wallet](#download-and-build-the-cli-wallet) + - [Run the wallet](#run-the-wallet) ## Configure zebra for lightwalletd diff --git a/docker/.env b/docker/.env new file mode 100644 index 00000000000..2d96240f23e --- /dev/null +++ b/docker/.env @@ -0,0 +1,33 @@ +RUST_LOG=info +# This variable forces the use of color in the logs +ZEBRA_FORCE_USE_COLOR=1 +LOG_COLOR=true + +### +# Configuration Variables +# These variables are used to configure the zebra node +# Check the entrypoint.sh script for more details +### + +# The config file full path used in the Dockerfile. +ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml +# [network] +NETWORK=Mainnet +ZEBRA_LISTEN_ADDR=0.0.0.0 +# [consensus] +ZEBRA_CHECKPOINT_SYNC=true +# [state] +# Set this to change the default cached state directory +ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache +LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache +# [metrics] +METRICS_ENDPOINT_ADDR=0.0.0.0 +METRICS_ENDPOINT_PORT=9999 +# [tracing] +TRACING_ENDPOINT_ADDR=0.0.0.0 +TRACING_ENDPOINT_PORT=3000 +# [rpc] +RPC_LISTEN_ADDR=0.0.0.0 +# if ${RPC_PORT} is not set, it will use the default value for the current network +RPC_PORT=8232 + diff --git a/docker/Dockerfile b/docker/Dockerfile index e520ce7d781..570642a7949 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -17,6 +17,7 @@ # https://github.com/ZcashFoundation/zebra/settings/variables/actions ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" +ARG EXPERIMENTAL_FEATURES="" # This stage implements cargo-chef for docker layer caching FROM rust:bullseye as chef @@ -111,6 +112,8 @@ ENV ZEBRA_SKIP_IPV6_TESTS=${ZEBRA_SKIP_IPV6_TESTS:-1} # separately from the test and production image builds. ARG FEATURES ARG TEST_FEATURES +ARG EXPERIMENTAL_FEATURES +# TODO: add empty $EXPERIMENTAL_FEATURES when we can avoid adding an extra space to the end of the string ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" # Re-hydrate the minimum project skeleton identified by `cargo chef prepare` in the planner stage, @@ -122,7 +125,6 @@ ARG ENTRYPOINT_FEATURES="${FEATURES} ${TEST_FEATURES}" # # TODO: add --locked when cargo-chef supports it RUN cargo chef cook --tests --release --features "${ENTRYPOINT_FEATURES}" --workspace --recipe-path recipe.json - # Undo the source file changes made by cargo-chef. # rsync invalidates the cargo cache for the changed files only, by updating their timestamps. # This makes sure the fake empty binaries created by cargo-chef are rebuilt. @@ -140,6 +142,9 @@ RUN chmod u+x /entrypoint.sh # Entrypoint environment variables ENV ENTRYPOINT_FEATURES=${ENTRYPOINT_FEATURES} +# We repeat the ARGs here, so they are available in the entrypoint.sh script for $RUN_ALL_EXPERIMENTAL_TESTS +ARG EXPERIMENTAL_FEATURES="shielded-scan journald prometheus filter-reload" +ENV ENTRYPOINT_FEATURES_EXPERIMENTAL="${ENTRYPOINT_FEATURES} ${EXPERIMENTAL_FEATURES}" # By default, runs the entrypoint tests specified by the environmental variables (if any are set) ENTRYPOINT [ "/entrypoint.sh" ] @@ -183,6 +188,7 @@ COPY --from=release /entrypoint.sh / RUN apt-get update && \ apt-get install -y --no-install-recommends \ ca-certificates \ + curl \ rocksdb-tools # Config settings for zebrad diff --git a/docker/docker-compose.lwd.yml b/docker/docker-compose.lwd.yml new file mode 100644 index 00000000000..7d8c56b1855 --- /dev/null +++ b/docker/docker-compose.lwd.yml @@ -0,0 +1,59 @@ +version: "3.8" + +services: + zebra: + ports: + - "8232:8232" # Opens an RPC endpoint (for lightwalletd and mining) + healthcheck: + start_period: 1m + interval: 15s + timeout: 10s + retries: 3 + test: ["CMD-SHELL", "curl --data-binary '{\"id\":\"curltest\", \"method\": \"getinfo\"}' -H 'content-type: application/json' 127.0.0.1:8232 || exit 1"] + + lightwalletd: + image: electriccoinco/lightwalletd + platform: linux/amd64 + depends_on: + zebra: + condition: service_started + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + environment: + - LWD_GRPC_PORT=9067 + - LWD_HTTP_PORT=9068 + configs: + - source: lwd_config + target: /etc/lightwalletd/zcash.conf + uid: '2002' # Golang's container default user uid + gid: '2002' # Golang's container default group gid + mode: 0440 + volumes: + - litewalletd-data:/var/lib/lightwalletd/db + #! This setup with --no-tls-very-insecure is only for testing purposes + #! For production environments follow the guidelines here: https://github.com/zcash/lightwalletd#production-usage + command: > + --no-tls-very-insecure + --grpc-bind-addr=0.0.0.0:9067 + --http-bind-addr=0.0.0.0:9068 + --zcash-conf-path=/etc/lightwalletd/zcash.conf + --data-dir=/var/lib/lightwalletd/db + --log-file=/dev/stdout + --log-level=7 + ports: + - "9067:9067" # gRPC + - "9068:9068" # HTTP + +configs: + lwd_config: + # Change the following line to point to a zcash.conf on your host machine + # to allow for easy configuration changes without rebuilding the image + file: ./zcash-lightwalletd/zcash.conf + +volumes: + litewalletd-data: + driver: local diff --git a/docker/docker-compose.test.yml b/docker/docker-compose.test.yml new file mode 100644 index 00000000000..fac94e3f4db --- /dev/null +++ b/docker/docker-compose.test.yml @@ -0,0 +1,36 @@ +version: "3.8" + +services: + zebra: + build: + context: ../ + dockerfile: docker/Dockerfile + target: tests + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + # Change this to the command you want to run, respecting the entrypoint.sh + # For example, to run the tests, use the following command: + # command: ["cargo", "test", "--locked", "--release", "--features", "${TEST_FEATURES}", "--package", "zebrad", "--test", "acceptance", "--", "--nocapture", "--include-ignored", "sync_large_checkpoints_"] + volumes: + - zebrad-cache:/var/cache/zebrad-cache + - lwd-cache:/var/cache/lwd-cache + ports: + # Zebra uses the following inbound and outbound TCP ports + - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) + - "8233:8233" # Mainnet Network (for peer connections) + - "18233:18233" # Testnet Network + # - "9999:9999" # Metrics + # - "3000:3000" # Tracing + env_file: + - test.env + +volumes: + zebrad-cache: + driver: local + + lwd-cache: + driver: local diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml new file mode 100644 index 00000000000..fd1b2ccc5b1 --- /dev/null +++ b/docker/docker-compose.yml @@ -0,0 +1,45 @@ +version: "3.8" + +services: + zebra: + image: zfnd/zebra + platform: linux/amd64 + build: + context: ../ + dockerfile: docker/Dockerfile + target: runtime + restart: unless-stopped + deploy: + resources: + reservations: + cpus: "4" + memory: 16G + env_file: + - .env + #! Uncomment the `configs` mapping below to use the `zebrad.toml` config file from the host machine + #! NOTE: This will override the zebrad.toml in the image and make some variables irrelevant + # configs: + # - source: zebra_config + # target: /etc/zebrad/zebrad.toml + # uid: '2001' # Rust's container default user uid + # gid: '2001' # Rust's container default group gid + # mode: 0440 + volumes: + - zebrad-cache:/var/cache/zebrad-cache + ports: + # Zebra uses the following default inbound and outbound TCP ports + - "8233:8233" # Mainnet Network (for peer connections) + # - "8232:8232" # Opens an RPC endpoint (for wallet storing and mining) + # - "18233:18233" # Testnet Network + # - "9999:9999" # Metrics + # - "3000:3000" # Tracing + +configs: + zebra_config: + # Change the following line to point to a zebrad.toml on your host machine + # to allow for easy configuration changes without rebuilding the image + file: ../zebrad/tests/common/configs/v1.0.0-rc.2.toml/ + +volumes: + zebrad-cache: + driver: local diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 8db5acaab6c..ac7ffbff4a9 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -58,6 +58,9 @@ fi #### : "${RUN_ALL_TESTS:=}" +: "${RUN_ALL_EXPERIMENTAL_TESTS:=}" +: "${TEST_FAKE_ACTIVATION_HEIGHTS:=}" +: "${TEST_ZEBRA_EMPTY_SYNC:=}" : "${ZEBRA_TEST_LIGHTWALLETD:=}" : "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES:=}" : "${FULL_SYNC_TESTNET_TIMEOUT_MINUTES:=}" @@ -221,12 +224,27 @@ case "$1" in # For these tests, we activate the test features to avoid recompiling `zebrad`, # but we don't actually run any gRPC tests. if [[ "${RUN_ALL_TESTS}" -eq "1" ]]; then - # Run all the available tests for the current environment. - # If the lightwalletd environmental variables are set, we will also run those tests. - exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored + # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. + # If the lightwalletd environmental variables are set, we will also run those tests. + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES}" --workspace -- --nocapture --include-ignored + + elif [[ "${RUN_ALL_EXPERIMENTAL_TESTS}" -eq "1" ]]; then + # Run unit, basic acceptance tests, and ignored tests with experimental features. + # If the lightwalletd environmental variables are set, we will also run those tests. + exec cargo test --locked --release --features "${ENTRYPOINT_FEATURES_EXPERIMENTAL}" --workspace -- --nocapture --include-ignored + + elif [[ "${TEST_FAKE_ACTIVATION_HEIGHTS}" -eq "1" ]]; then + # Run state tests with fake activation heights. + exec cargo test --locked --release --features "zebra-test" --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + + elif [[ "${TEST_ZEBRA_EMPTY_SYNC}" -eq "1" ]]; then + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. + run_cargo_test "${ENTRYPOINT_FEATURES}" "sync_large_checkpoints_" + + elif [[ "${ZEBRA_TEST_LIGHTWALLETD}" -eq "1" ]]; then + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + run_cargo_test "${ENTRYPOINT_FEATURES}" "lightwalletd_integration" - # For these tests, we activate the gRPC feature to avoid recompiling `zebrad`, - # but we don't actually run any gRPC tests. elif [[ -n "${FULL_SYNC_MAINNET_TIMEOUT_MINUTES}" ]]; then # Run a Zebra full sync test on mainnet. run_cargo_test "${ENTRYPOINT_FEATURES}" "full_sync_mainnet" diff --git a/docker/test.env b/docker/test.env new file mode 100644 index 00000000000..fd2a7c876b7 --- /dev/null +++ b/docker/test.env @@ -0,0 +1,60 @@ +### +# Configuration Variables +# These variables are used to configure the zebra node +# Check the entrypoint.sh script for more details +### + +# Set this to change the default log level (must be set at build time) +RUST_LOG=info +# This variable forces the use of color in the logs +ZEBRA_FORCE_USE_COLOR=1 +LOG_COLOR=true +# Path to the config file. This variable has a default set in entrypoint.sh +# ZEBRA_CONF_PATH=/etc/zebrad/zebrad.toml +# [network] +NETWORK=Mainnet +# [state] +# Set this to change the default cached state directory +ZEBRA_CACHED_STATE_DIR=/var/cache/zebrad-cache +LIGHTWALLETD_DATA_DIR=/var/cache/lwd-cache +# [tracing] +LOG_COLOR=false +TRACING_ENDPOINT_ADDR=0.0.0.0 +TRACING_ENDPOINT_PORT=3000 + +#### +# Test Variables +# These variables are used to run tests in the Dockerfile +# Check the entrypoint.sh script for more details +#### + +# Unit tests +# TODO: These variables are evaluated to any value, even setting a NULL value will evaluate to true +# TEST_FAKE_ACTIVATION_HEIGHTS= +# ZEBRA_SKIP_NETWORK_TESTS +# ZEBRA_SKIP_IPV6_TESTS +RUN_ALL_TESTS= +RUN_ALL_EXPERIMENTAL_TESTS= +TEST_ZEBRA_EMPTY_SYNC= +ZEBRA_TEST_LIGHTWALLETD= +# Integration Tests +# Most of these tests require a cached state directory to save the network state +TEST_DISK_REBUILD= +# These tests needs a Zebra cached state +TEST_CHECKPOINT_SYNC= +GENERATE_CHECKPOINTS_MAINNET= +GENERATE_CHECKPOINTS_TESTNET= +TEST_UPDATE_SYNC= +# These tests need a Lightwalletd binary + a Zebra cached state +TEST_LWD_RPC_CALL= +TEST_GET_BLOCK_TEMPLATE= +TEST_SUBMIT_BLOCK= +# These tests need a Lightwalletd binary + Lightwalletd cached state + a Zebra cached state +TEST_LWD_UPDATE_SYNC= +TEST_LWD_GRPC= +TEST_LWD_TRANSACTIONS= +# Full sync tests +# These tests could take a long time to run, depending on the network +FULL_SYNC_MAINNET_TIMEOUT_MINUTES= +FULL_SYNC_TESTNET_TIMEOUT_MINUTES= +TEST_LWD_FULL_SYNC= diff --git a/docker/zcash-lightwalletd/zcash.conf b/docker/zcash-lightwalletd/zcash.conf new file mode 100644 index 00000000000..dc2a0238fc5 --- /dev/null +++ b/docker/zcash-lightwalletd/zcash.conf @@ -0,0 +1,5 @@ +rpcbind=zebra +rpcport=8232 +rpcuser=zcashrpc +rpcpassword=changeme +testnet=0 diff --git a/tower-batch-control/Cargo.toml b/tower-batch-control/Cargo.toml index de8b97c1f91..ccab08d6aaf 100644 --- a/tower-batch-control/Cargo.toml +++ b/tower-batch-control/Cargo.toml @@ -27,7 +27,7 @@ categories = ["algorithms", "asynchronous"] [dependencies] futures = "0.3.30" futures-core = "0.3.28" -pin-project = "1.1.3" +pin-project = "1.1.4" rayon = "1.8.1" tokio = { version = "1.35.1", features = ["time", "sync", "tracing", "macros"] } tokio-util = "0.7.10" diff --git a/tower-fallback/Cargo.toml b/tower-fallback/Cargo.toml index 810d1eedc24..77fce4e622d 100644 --- a/tower-fallback/Cargo.toml +++ b/tower-fallback/Cargo.toml @@ -16,7 +16,7 @@ keywords = ["tower", "batch"] categories = ["algorithms", "asynchronous"] [dependencies] -pin-project = "1.1.3" +pin-project = "1.1.4" tower = "0.4.13" futures-core = "0.3.28" tracing = "0.1.39" diff --git a/zebra-chain/Cargo.toml b/zebra-chain/Cargo.toml index 45b89956f7d..554235e4f33 100644 --- a/zebra-chain/Cargo.toml +++ b/zebra-chain/Cargo.toml @@ -34,6 +34,11 @@ getblocktemplate-rpcs = [ "zcash_address", ] +# Experimental shielded scanning support +shielded-scan = [ + "zcash_client_backend" +] + # Experimental internal miner support # TODO: Internal miner feature functionality was removed at https://github.com/ZcashFoundation/zebra/issues/8180 # See what was removed at https://github.com/ZcashFoundation/zebra/blob/v1.5.1/zebra-chain/Cargo.toml#L38-L43 @@ -96,7 +101,7 @@ zcash_note_encryption = "0.4.0" zcash_primitives = { version = "0.13.0-rc.1", features = ["transparent-inputs"] } # Time -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std", "serde"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std", "serde"] } humantime = "2.1.0" # Error Handling & Formatting @@ -107,13 +112,13 @@ tracing = "0.1.39" # Serialization hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.193", features = ["serde_derive", "rc"] } -serde_with = "3.5.0" +serde = { version = "1.0.196", features = ["serde_derive", "rc"] } +serde_with = "3.5.1" serde-big-array = "0.5.1" # Processing futures = "0.3.30" -itertools = "0.12.0" +itertools = "0.12.1" rayon = "1.8.1" # ZF deps @@ -122,14 +127,17 @@ redjubjub = "0.7.0" reddsa = "0.5.1" # Production feature json-conversion -serde_json = { version = "1.0.111", optional = true } +serde_json = { version = "1.0.113", optional = true } # Production feature async-error and testing feature proptest-impl tokio = { version = "1.35.1", optional = true } -# Experimental feature getblocktemplate-rpcs +# Production feature getblocktemplate-rpcs zcash_address = { version = "0.3.1", optional = true } +# Experimental feature shielded-scan +zcash_client_backend = { version = "0.10.0-rc.1", optional = true } + # Optional testing dependencies proptest = { version = "1.4.0", optional = true } proptest-derive = { version = "0.4.0", optional = true } diff --git a/zebra-chain/src/primitives.rs b/zebra-chain/src/primitives.rs index d074463286e..f5b14f8cece 100644 --- a/zebra-chain/src/primitives.rs +++ b/zebra-chain/src/primitives.rs @@ -12,6 +12,9 @@ mod address; #[cfg(feature = "getblocktemplate-rpcs")] pub use address::Address; +#[cfg(feature = "shielded-scan")] +pub mod viewing_key; + pub mod byte_array; pub use ed25519_zebra as ed25519; diff --git a/zebra-chain/src/primitives/viewing_key.rs b/zebra-chain/src/primitives/viewing_key.rs new file mode 100644 index 00000000000..3a34d534dc6 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key.rs @@ -0,0 +1,43 @@ +//! Type definitions for viewing keys and their hashes. + +use crate::parameters::Network; + +mod orchard; +mod sapling; + +use orchard::OrchardViewingKey; +use sapling::SaplingViewingKey; + +#[cfg(test)] +mod tests; + +/// A Zcash Sapling or Orchard viewing key +#[derive(Debug, Clone)] +pub enum ViewingKey { + /// A viewing key for Sapling + Sapling(SaplingViewingKey), + + /// A viewing key for Orchard + Orchard(OrchardViewingKey), +} + +impl ViewingKey { + /// Accepts an encoded Sapling viewing key to decode + /// + /// Returns a [`ViewingKey`] if successful, or None otherwise + fn parse_sapling(sapling_key: &str, network: Network) -> Option { + SaplingViewingKey::parse(sapling_key, network).map(Self::Sapling) + } + + /// Accepts an encoded Orchard viewing key to decode + /// + /// Returns a [`ViewingKey`] if successful, or None otherwise + fn parse_orchard(sapling_key: &str, network: Network) -> Option { + OrchardViewingKey::parse(sapling_key, network).map(Self::Orchard) + } + + /// Parses an encoded viewing key and returns it as a [`ViewingKey`] type. + pub fn parse(key: &str, network: Network) -> Option { + Self::parse_sapling(key, network).or_else(|| Self::parse_orchard(key, network)) + } +} diff --git a/zebra-chain/src/primitives/viewing_key/orchard.rs b/zebra-chain/src/primitives/viewing_key/orchard.rs new file mode 100644 index 00000000000..ddb0664be2a --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/orchard.rs @@ -0,0 +1,17 @@ +//! Defines types and implements methods for parsing Orchard viewing keys and converting them to `zebra-chain` types + +use crate::parameters::Network; + +/// A Zcash Orchard viewing key +#[derive(Debug, Clone)] +pub enum OrchardViewingKey {} + +impl OrchardViewingKey { + /// Accepts an encoded Orchard viewing key to decode + /// + /// Returns a [`OrchardViewingKey`] if successful, or None otherwise + pub fn parse(_key: &str, _network: Network) -> Option { + // TODO: parse Orchard viewing keys + None + } +} diff --git a/zebra-chain/src/primitives/viewing_key/sapling.rs b/zebra-chain/src/primitives/viewing_key/sapling.rs new file mode 100644 index 00000000000..f56731d44c3 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/sapling.rs @@ -0,0 +1,87 @@ +//! Defines types and implements methods for parsing Sapling viewing keys and converting them to `zebra-chain` types + +use zcash_client_backend::encoding::decode_extended_full_viewing_key; +use zcash_primitives::{ + constants::*, + sapling::keys::{FullViewingKey as SaplingFvk, SaplingIvk}, + zip32::DiversifiableFullViewingKey as SaplingDfvk, +}; + +use crate::parameters::Network; + +/// A Zcash Sapling viewing key +#[derive(Debug, Clone)] +pub enum SaplingViewingKey { + /// An incoming viewing key for Sapling + Ivk(Box), + + /// A full viewing key for Sapling + Fvk(Box), + + /// A diversifiable full viewing key for Sapling + Dfvk(Box), +} + +impl SaplingViewingKey { + /// Accepts an encoded Sapling extended full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Dfvk`] if successful, or None otherwise + fn parse_extended_full_viewing_key(sapling_key: &str, network: Network) -> Option { + decode_extended_full_viewing_key(network.sapling_efvk_hrp(), sapling_key) + // this should fail often, so a debug-level log is okay + .map_err(|err| debug!(?err, "could not decode Sapling extended full viewing key")) + .ok() + .map(|efvk| Box::new(efvk.to_diversifiable_full_viewing_key())) + .map(Self::Dfvk) + } + + /// Accepts an encoded Sapling diversifiable full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Dfvk`] if successful, or None otherwise + fn parse_diversifiable_full_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling diversifiable full viewing key + None + } + + /// Accepts an encoded Sapling full viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Fvk`] if successful, or None otherwise + fn parse_full_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling full viewing key + None + } + + /// Accepts an encoded Sapling incoming viewing key to decode + /// + /// Returns a [`SaplingViewingKey::Ivk`] if successful, or None otherwise + fn parse_incoming_viewing_key(_sapling_key: &str, _network: Network) -> Option { + // TODO: Parse Sapling incoming viewing key + None + } + + /// Accepts an encoded Sapling viewing key to decode + /// + /// Returns a [`SaplingViewingKey`] if successful, or None otherwise + pub(super) fn parse(key: &str, network: Network) -> Option { + // TODO: Try types with prefixes first if some don't have prefixes? + Self::parse_extended_full_viewing_key(key, network) + .or_else(|| Self::parse_diversifiable_full_viewing_key(key, network)) + .or_else(|| Self::parse_full_viewing_key(key, network)) + .or_else(|| Self::parse_incoming_viewing_key(key, network)) + } +} + +impl Network { + /// Returns the human-readable prefix for an Zcash Sapling extended full viewing key + /// for this network. + fn sapling_efvk_hrp(&self) -> &'static str { + if self.is_a_test_network() { + // Assume custom testnets have the same HRP + // + // TODO: add the regtest HRP here + testnet::HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY + } else { + mainnet::HRP_SAPLING_EXTENDED_FULL_VIEWING_KEY + } + } +} diff --git a/zebra-chain/src/primitives/viewing_key/tests.rs b/zebra-chain/src/primitives/viewing_key/tests.rs new file mode 100644 index 00000000000..0b86c143131 --- /dev/null +++ b/zebra-chain/src/primitives/viewing_key/tests.rs @@ -0,0 +1,15 @@ +//! Tests for zebra-chain viewing key hashes + +use super::*; + +/// The extended Sapling viewing key of [ZECpages](https://zecpages.com/boardinfo) +pub const ZECPAGES_SAPLING_VIEWING_KEY: &str = "zxviews1q0duytgcqqqqpqre26wkl45gvwwwd706xw608hucmvfalr759ejwf7qshjf5r9aa7323zulvz6plhttp5mltqcgs9t039cx2d09mgq05ts63n8u35hyv6h9nc9ctqqtue2u7cer2mqegunuulq2luhq3ywjcz35yyljewa4mgkgjzyfwh6fr6jd0dzd44ghk0nxdv2hnv4j5nxfwv24rwdmgllhe0p8568sgqt9ckt02v2kxf5ahtql6s0ltjpkckw8gtymxtxuu9gcr0swvz"; + +/// Tests that `ViewingKey::parse` successfully decodes the zecpages sapling extended full viewing key +#[test] +fn parses_sapling_efvk_correctly() { + let _init_guard = zebra_test::init(); + + ViewingKey::parse(ZECPAGES_SAPLING_VIEWING_KEY, Network::Mainnet) + .expect("should parse hard-coded viewing key successfully"); +} diff --git a/zebra-consensus/Cargo.toml b/zebra-consensus/Cargo.toml index de0c4ed19ea..a8eb98db3e1 100644 --- a/zebra-consensus/Cargo.toml +++ b/zebra-consensus/Cargo.toml @@ -43,11 +43,11 @@ jubjub = "0.10.0" rand = "0.8.5" rayon = "1.8.1" -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std"] } displaydoc = "0.2.4" lazy_static = "1.4.0" once_cell = "1.18.0" -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } futures = "0.3.30" futures-util = "0.3.28" diff --git a/zebra-network/Cargo.toml b/zebra-network/Cargo.toml index d8c730920c4..a2636e8c135 100644 --- a/zebra-network/Cargo.toml +++ b/zebra-network/Cargo.toml @@ -43,20 +43,20 @@ proptest-impl = ["proptest", "proptest-derive", "zebra-chain/proptest-impl"] bitflags = "2.4.2" byteorder = "1.5.0" bytes = "1.5.0" -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" hex = "0.4.3" humantime-serde = "1.1.1" -indexmap = { version = "2.0.1", features = ["serde"] } -itertools = "0.12.0" +indexmap = { version = "2.2.1", features = ["serde"] } +itertools = "0.12.1" lazy_static = "1.4.0" num-integer = "0.1.45" ordered-map = "0.4.2" -pin-project = "1.1.3" +pin-project = "1.1.4" rand = "0.8.5" rayon = "1.8.1" regex = "1.10.3" -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } tempfile = "3.9.0" thiserror = "1.0.56" diff --git a/zebra-node-services/Cargo.toml b/zebra-node-services/Cargo.toml index 49f5fd3085e..2a665ef982a 100644 --- a/zebra-node-services/Cargo.toml +++ b/zebra-node-services/Cargo.toml @@ -44,13 +44,13 @@ color-eyre = { version = "0.6.2", optional = true } jsonrpc-core = { version = "18.0.0", optional = true } # Security: avoid default dependency on openssl reqwest = { version = "0.11.23", default-features = false, features = ["rustls-tls"], optional = true } -serde = { version = "1.0.193", optional = true } -serde_json = { version = "1.0.111", optional = true } +serde = { version = "1.0.196", optional = true } +serde_json = { version = "1.0.113", optional = true } [dev-dependencies] color-eyre = "0.6.2" jsonrpc-core = "18.0.0" reqwest = { version = "0.11.23", default-features = false, features = ["rustls-tls"] } -serde = "1.0.193" -serde_json = "1.0.111" +serde = "1.0.196" +serde_json = "1.0.113" diff --git a/zebra-node-services/src/scan_service/request.rs b/zebra-node-services/src/scan_service/request.rs index 5f85e5ece72..6069baf25be 100644 --- a/zebra-node-services/src/scan_service/request.rs +++ b/zebra-node-services/src/scan_service/request.rs @@ -1,5 +1,10 @@ //! `zebra_scan::service::ScanService` request types. +use crate::BoxError; + +/// The maximum number of keys that may be included in a request to the scan service +const MAX_REQUEST_KEYS: usize = 1000; + #[derive(Debug)] /// Request types for `zebra_scan::service::ScanService` pub enum Request { @@ -12,15 +17,70 @@ pub enum Request { /// TODO: Accept `ViewingKeyWithHash`es and return Ok(()) if successful or an error RegisterKeys(Vec<()>), - /// TODO: Accept `KeyHash`es and return Ok(`Vec`) with hashes of deleted keys - DeleteKeys(Vec<()>), + /// Deletes viewing keys and their results from the database. + DeleteKeys(Vec), - /// TODO: Accept `KeyHash`es and return `Transaction`s - Results(Vec<()>), + /// Accept keys and return transaction data + Results(Vec), /// TODO: Accept `KeyHash`es and return a channel receiver SubscribeResults(Vec<()>), - /// TODO: Accept `KeyHash`es and return transaction ids - ClearResults(Vec<()>), + /// Clear the results for a set of viewing keys + ClearResults(Vec), +} + +impl Request { + /// Check that the request data is valid for the request variant + pub fn check(&self) -> Result<(), BoxError> { + self.check_num_keys()?; + + Ok(()) + } + + /// Checks that requests which include keys have a valid number of keys. + fn check_num_keys(&self) -> Result<(), BoxError> { + match self { + Request::DeleteKeys(keys) | Request::ClearResults(keys) + if keys.is_empty() || keys.len() > MAX_REQUEST_KEYS => + { + Err(format!("request must include between 1 and {MAX_REQUEST_KEYS} keys").into()) + } + + _ => Ok(()), + } + } +} + +#[test] +fn test_check_num_keys() { + let fake_keys: Vec<_> = std::iter::repeat(String::new()) + .take(MAX_REQUEST_KEYS + 1) + .collect(); + + let bad_requests = [ + Request::DeleteKeys(vec![]), + Request::DeleteKeys(fake_keys.clone()), + Request::ClearResults(vec![]), + Request::ClearResults(fake_keys), + ]; + + let valid_requests = [ + Request::DeleteKeys(vec![String::new()]), + Request::ClearResults(vec![String::new()]), + ]; + + for request in bad_requests { + let error = request.check().expect_err("check should return an error"); + + assert_eq!( + format!("request must include between 1 and {MAX_REQUEST_KEYS} keys"), + error.to_string(), + "check_num_keys should return an error because there are too many keys" + ); + } + + for request in valid_requests { + request.check().expect("check should return Ok(())"); + } } diff --git a/zebra-node-services/src/scan_service/response.rs b/zebra-node-services/src/scan_service/response.rs index 084f6d9dc88..3a04de94218 100644 --- a/zebra-node-services/src/scan_service/response.rs +++ b/zebra-node-services/src/scan_service/response.rs @@ -1,8 +1,11 @@ //! `zebra_scan::service::ScanService` response types. -use std::sync::{mpsc, Arc}; +use std::{ + collections::BTreeMap, + sync::{mpsc, Arc}, +}; -use zebra_chain::{block::Height, transaction::Transaction}; +use zebra_chain::{block::Height, transaction::Hash}; #[derive(Debug)] /// Response types for `zebra_scan::service::ScanService` @@ -14,8 +17,16 @@ pub enum Response { }, /// Response to Results request - Results(Vec), + /// + /// We use the nested `BTreeMap` so we don't repeat any piece of response data. + Results(BTreeMap>>), + + /// Response to DeleteKeys request + DeletedKeys, + + /// Response to ClearResults request + ClearedResults, /// Response to SubscribeResults request - SubscribeResults(mpsc::Receiver>), + SubscribeResults(mpsc::Receiver>), } diff --git a/zebra-rpc/Cargo.toml b/zebra-rpc/Cargo.toml index 7e7e7469731..74ba2213322 100644 --- a/zebra-rpc/Cargo.toml +++ b/zebra-rpc/Cargo.toml @@ -42,7 +42,7 @@ proptest-impl = [ ] [dependencies] -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std"] } futures = "0.3.30" # lightwalletd sends JSON-RPC requests over HTTP 1.1 @@ -53,8 +53,8 @@ jsonrpc-derive = "18.0.0" jsonrpc-http-server = "18.0.0" # zebra-rpc needs the preserve_order feature in serde_json, which is a dependency of jsonrpc-core -serde_json = { version = "1.0.111", features = ["preserve_order"] } -indexmap = { version = "2.0.1", features = ["serde"] } +serde_json = { version = "1.0.113", features = ["preserve_order"] } +indexmap = { version = "2.2.1", features = ["serde"] } tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "tracing"] } tower = "0.4.13" @@ -62,7 +62,7 @@ tower = "0.4.13" tracing = "0.1.39" hex = { version = "0.4.3", features = ["serde"] } -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } # Experimental feature getblocktemplate-rpcs rand = { version = "0.8.5", optional = true } diff --git a/zebra-rpc/src/queue.rs b/zebra-rpc/src/queue.rs index e8bfa420da2..97662ec7be3 100644 --- a/zebra-rpc/src/queue.rs +++ b/zebra-rpc/src/queue.rs @@ -95,7 +95,7 @@ impl Queue { /// Remove a transaction from the queue. pub fn remove(&mut self, unmined_id: UnminedTxId) { - self.transactions.remove(&unmined_id); + self.transactions.swap_remove(&unmined_id); } /// Remove the oldest transaction from the queue. diff --git a/zebra-scan/Cargo.toml b/zebra-scan/Cargo.toml index 8d677c54b57..fd9fb1a2f97 100644 --- a/zebra-scan/Cargo.toml +++ b/zebra-scan/Cargo.toml @@ -17,6 +17,7 @@ categories = ["cryptography::cryptocurrencies"] [[bin]] # Bin to run the Scanner gRPC server name = "scanner-grpc-server" path = "src/bin/rpc_server.rs" +required-features = ["proptest-impl"] [features] @@ -41,10 +42,10 @@ proptest-impl = [ [dependencies] color-eyre = "0.6.2" -indexmap = { version = "2.0.1", features = ["serde"] } -itertools = "0.12.0" +indexmap = { version = "2.2.1", features = ["serde"] } +itertools = "0.12.1" semver = "1.0.21" -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } tokio = { version = "1.35.1", features = ["time"] } tower = "0.4.13" tracing = "0.1.39" @@ -66,6 +67,7 @@ chrono = { version = "0.4.32", default-features = false, features = [ zebra-node-services = { path = "../zebra-node-services", version = "1.0.0-beta.33" } zebra-grpc = { path = "../zebra-grpc", version = "0.1.0-alpha.1" } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std", "serde"] } # test feature proptest-impl proptest = { version = "1.4.0", optional = true } diff --git a/zebra-scan/src/bin/rpc_server.rs b/zebra-scan/src/bin/rpc_server.rs index a9aa9753a48..9ebe2b00246 100644 --- a/zebra-scan/src/bin/rpc_server.rs +++ b/zebra-scan/src/bin/rpc_server.rs @@ -2,15 +2,16 @@ use tower::ServiceBuilder; -use zebra_scan::service::ScanService; +use zebra_scan::{service::ScanService, storage::Storage}; #[tokio::main] /// Runs an RPC server with a mock ScanTask async fn main() -> Result<(), Box> { let (config, network) = Default::default(); - let scan_service = ServiceBuilder::new() - .buffer(10) - .service(ScanService::new_with_mock_scanner(&config, network)); + + let (scan_service, _cmd_receiver) = + ScanService::new_with_mock_scanner(Storage::new(&config, network, false)); + let scan_service = ServiceBuilder::new().buffer(10).service(scan_service); // Start the gRPC server. zebra_grpc::server::init(scan_service).await?; diff --git a/zebra-scan/src/init.rs b/zebra-scan/src/init.rs index f40eebac345..1ed32b528bb 100644 --- a/zebra-scan/src/init.rs +++ b/zebra-scan/src/init.rs @@ -23,7 +23,7 @@ pub enum ScanTaskCommand { done_tx: oneshot::Sender<()>, /// Key hashes that are to be removed - key_hashes: Vec<()>, + keys: Vec, }, /// Start sending results for key hashes to `result_sender` @@ -36,25 +36,29 @@ pub enum ScanTaskCommand { }, } -#[derive(Debug)] +#[derive(Debug, Clone)] /// Scan task handle and command channel sender pub struct ScanTask { /// [`JoinHandle`] of scan task - pub handle: JoinHandle>, + pub handle: Arc>>, /// Task command channel sender - cmd_sender: mpsc::Sender, + pub cmd_sender: mpsc::Sender, } impl ScanTask { /// Spawns a new [`ScanTask`] for tests. - pub fn mock() -> Self { - let (cmd_sender, _cmd_receiver) = mpsc::channel(); - - Self { - handle: tokio::spawn(std::future::pending()), - cmd_sender, - } + #[cfg(any(test, feature = "proptest-impl"))] + pub fn mock() -> (Self, mpsc::Receiver) { + let (cmd_sender, cmd_receiver) = mpsc::channel(); + + ( + Self { + handle: Arc::new(tokio::spawn(std::future::pending())), + cmd_sender, + }, + cmd_receiver, + ) } /// Spawns a new [`ScanTask`]. @@ -64,11 +68,16 @@ impl ScanTask { state: scan::State, chain_tip_change: ChainTipChange, ) -> Self { - // TODO: Pass `_cmd_receiver` to `scan::start()` to pass it new keys after it's been spawned - let (cmd_sender, _cmd_receiver) = mpsc::channel(); + let (cmd_sender, cmd_receiver) = mpsc::channel(); Self { - handle: scan::spawn_init(config, network, state, chain_tip_change), + handle: Arc::new(scan::spawn_init( + config, + network, + state, + chain_tip_change, + cmd_receiver, + )), cmd_sender, } } @@ -80,18 +89,21 @@ impl ScanTask { ) -> Result<(), mpsc::SendError> { self.cmd_sender.send(command) } -} -/// Initialize the scanner based on its config, and spawn a task for it. -/// -/// TODO: add a test for this function. -pub fn spawn_init( - config: &Config, - network: Network, - state: scan::State, - chain_tip_change: ChainTipChange, -) -> JoinHandle> { - scan::spawn_init(config, network, state, chain_tip_change) + /// Sends a message to the scan task to remove the provided viewing keys. + pub fn remove_keys( + &mut self, + keys: &[String], + ) -> Result, mpsc::SendError> { + let (done_tx, done_rx) = oneshot::channel(); + + self.send(ScanTaskCommand::RemoveKeys { + keys: keys.to_vec(), + done_tx, + })?; + + Ok(done_rx) + } } /// Initialize [`ScanService`] based on its config. diff --git a/zebra-scan/src/lib.rs b/zebra-scan/src/lib.rs index 9d26881d970..c4052309367 100644 --- a/zebra-scan/src/lib.rs +++ b/zebra-scan/src/lib.rs @@ -19,4 +19,4 @@ pub mod service; pub mod tests; pub use config::Config; -pub use init::{init, spawn_init}; +pub use init::{init, ScanTask}; diff --git a/zebra-scan/src/scan.rs b/zebra-scan/src/scan.rs index 0c6c6352ff4..522f895ddef 100644 --- a/zebra-scan/src/scan.rs +++ b/zebra-scan/src/scan.rs @@ -2,7 +2,10 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::Arc, + sync::{ + mpsc::{Receiver, TryRecvError}, + Arc, + }, time::Duration, }; @@ -37,8 +40,9 @@ use zebra_chain::{ use zebra_state::{ChainTipChange, SaplingScannedResult, TransactionIndex}; use crate::{ + init::ScanTaskCommand, storage::{SaplingScanningKey, Storage}, - Config, + Config, ScanTask, }; /// The generic state type used by the scanner. @@ -66,6 +70,7 @@ pub async fn start( state: State, chain_tip_change: ChainTipChange, storage: Storage, + cmd_receiver: Receiver, ) -> Result<(), Report> { let network = storage.network(); let sapling_activation_height = storage.min_sapling_birthday_height(); @@ -102,12 +107,14 @@ pub async fn start( Ok::<_, Report>((key.clone(), parsed_keys)) }) .try_collect()?; - let parsed_keys = Arc::new(parsed_keys); + let mut parsed_keys = Arc::new(parsed_keys); // Give empty states time to verify some blocks before we start scanning. tokio::time::sleep(INITIAL_WAIT).await; loop { + parsed_keys = ScanTask::process_msgs(&cmd_receiver, parsed_keys)?; + let scanned_height = scan_height_and_store_results( height, state.clone(), @@ -130,6 +137,56 @@ pub async fn start( } } +impl ScanTask { + /// Accepts the scan task's `parsed_key` collection and a reference to the command channel receiver + /// + /// Processes messages in the scan task channel, updating `parsed_keys` if required. + /// + /// Returns the updated `parsed_keys` + fn process_msgs( + cmd_receiver: &Receiver, + mut parsed_keys: Arc< + HashMap, Vec)>, + >, + ) -> Result< + Arc, Vec)>>, + Report, + > { + loop { + let cmd = match cmd_receiver.try_recv() { + Ok(cmd) => cmd, + + Err(TryRecvError::Empty) => break, + Err(TryRecvError::Disconnected) => { + // Return early if the sender has been dropped. + return Err(eyre!("command channel disconnected")); + } + }; + + match cmd { + ScanTaskCommand::RemoveKeys { done_tx, keys } => { + // TODO: Replace with Arc::unwrap_or_clone() when it stabilises: + // https://github.com/rust-lang/rust/issues/93610 + let mut updated_parsed_keys = + Arc::try_unwrap(parsed_keys).unwrap_or_else(|arc| (*arc).clone()); + + for key in keys { + updated_parsed_keys.remove(&key); + } + + parsed_keys = Arc::new(updated_parsed_keys); + + // Ignore send errors for the done notification + let _ = done_tx.send(()); + } + + _ => continue, + } + } + + Ok(parsed_keys) + } +} /// Get the block at `height` from `state`, scan it with the keys in `parsed_keys`, and store the /// results in `storage`. If `height` is lower than the `key_birthdays` for that key, skip it. /// @@ -288,8 +345,9 @@ pub fn scan_block( /// Currently only accepts extended full viewing keys, and returns both their diversifiable full /// viewing key and their individual viewing key, for testing purposes. /// -/// TODO: work out what string format is used for SaplingIvk, if any, and support it here -/// performance: stop returning both the dfvk and ivk for the same key +// TODO: work out what string format is used for SaplingIvk, if any, and support it here +// performance: stop returning both the dfvk and ivk for the same key +// TODO: use `ViewingKey::parse` from zebra-chain instead pub fn sapling_key_to_scan_block_keys( sapling_key: &SaplingScanningKey, network: Network, @@ -444,11 +502,12 @@ pub fn spawn_init( network: Network, state: State, chain_tip_change: ChainTipChange, + cmd_receiver: Receiver, ) -> JoinHandle> { let config = config.clone(); // TODO: spawn an entirely new executor here, to avoid timing attacks. - tokio::spawn(init(config, network, state, chain_tip_change).in_current_span()) + tokio::spawn(init(config, network, state, chain_tip_change, cmd_receiver).in_current_span()) } /// Initialize the scanner based on its config. @@ -459,11 +518,12 @@ pub async fn init( network: Network, state: State, chain_tip_change: ChainTipChange, + cmd_receiver: Receiver, ) -> Result<(), Report> { let storage = tokio::task::spawn_blocking(move || Storage::new(&config, network, false)) .wait_for_panics() .await; // TODO: add more tasks here? - start(state, chain_tip_change, storage).await + start(state, chain_tip_change, storage, cmd_receiver).await } diff --git a/zebra-scan/src/service.rs b/zebra-scan/src/service.rs index dc80d0832ae..970e52f9db5 100644 --- a/zebra-scan/src/service.rs +++ b/zebra-scan/src/service.rs @@ -1,25 +1,32 @@ //! [`tower::Service`] for zebra-scan. -use std::{future::Future, pin::Pin, task::Poll}; +use std::{collections::BTreeMap, future::Future, pin::Pin, task::Poll, time::Duration}; use futures::future::FutureExt; use tower::Service; -use zebra_chain::parameters::Network; +use zebra_chain::{parameters::Network, transaction::Hash}; + use zebra_state::ChainTipChange; use crate::{init::ScanTask, scan, storage::Storage, Config, Request, Response}; +#[cfg(test)] +mod tests; + /// Zebra-scan [`tower::Service`] #[derive(Debug)] pub struct ScanService { /// On-disk storage - db: Storage, + pub db: Storage, /// Handle to scan task that's responsible for writing results scan_task: ScanTask, } +/// A timeout applied to `DeleteKeys` requests. +const DELETE_KEY_TIMEOUT: Duration = Duration::from_secs(15); + impl ScanService { /// Create a new [`ScanService`]. pub fn new( @@ -35,11 +42,15 @@ impl ScanService { } /// Create a new [`ScanService`] with a mock `ScanTask` - pub fn new_with_mock_scanner(config: &Config, network: Network) -> Self { - Self { - db: Storage::new(config, network, false), - scan_task: ScanTask::mock(), - } + #[cfg(any(test, feature = "proptest-impl"))] + pub fn new_with_mock_scanner( + db: Storage, + ) -> ( + Self, + std::sync::mpsc::Receiver, + ) { + let (scan_task, cmd_receiver) = ScanTask::mock(); + (Self { db, scan_task }, cmd_receiver) } } @@ -62,6 +73,10 @@ impl Service for ScanService { } fn call(&mut self, req: Request) -> Self::Future { + if let Err(error) = req.check() { + return async move { Err(error) }.boxed(); + } + match req { Request::Info => { let db = self.db.clone(); @@ -84,25 +99,79 @@ impl Service for ScanService { // - send new keys to scan task } - Request::DeleteKeys(_key_hashes) => { - // TODO: - // - delete these keys and their results from db - // - send deleted keys to scan task + Request::DeleteKeys(keys) => { + let mut db = self.db.clone(); + let mut scan_task = self.scan_task.clone(); + + return async move { + // Wait for a message to confirm that the scan task has removed the key up to `DELETE_KEY_TIMEOUT` + let remove_keys_result = + tokio::time::timeout(DELETE_KEY_TIMEOUT, scan_task.remove_keys(&keys)?) + .await + .map_err(|_| "timeout waiting for delete keys done notification"); + + // Delete the key from the database after either confirmation that it's been removed from the scan task, or + // waiting `DELETE_KEY_TIMEOUT`. + let delete_key_task = tokio::task::spawn_blocking(move || { + db.delete_sapling_keys(keys); + }); + + // Return timeout errors or `RecvError`s, or wait for the key to be deleted from the database. + remove_keys_result??; + delete_key_task.await?; + + Ok(Response::DeletedKeys) + } + .boxed(); } - Request::Results(_key_hashes) => { - // TODO: read results from db + Request::Results(keys) => { + let db = self.db.clone(); + + return async move { + let mut final_result = BTreeMap::new(); + for key in keys { + let db = db.clone(); + let mut heights_and_transactions = BTreeMap::new(); + let txs = { + let key = key.clone(); + tokio::task::spawn_blocking(move || db.sapling_results_for_key(&key)) + } + .await?; + txs.iter().for_each(|(k, v)| { + heights_and_transactions + .entry(*k) + .or_insert_with(Vec::new) + .extend(v.iter().map(|x| Hash::from(*x))); + }); + final_result.entry(key).or_insert(heights_and_transactions); + } + + Ok(Response::Results(final_result)) + } + .boxed(); } Request::SubscribeResults(_key_hashes) => { // TODO: send key_hashes and mpsc::Sender to scanner task, return mpsc::Receiver to caller } - Request::ClearResults(_key_hashes) => { - // TODO: clear results for these keys from db + Request::ClearResults(keys) => { + let mut db = self.db.clone(); + + return async move { + // Clear results from db for the provided `keys` + tokio::task::spawn_blocking(move || { + db.delete_sapling_results(keys); + }) + .await?; + + Ok(Response::ClearedResults) + } + .boxed(); } } - async move { Ok(Response::Results(vec![])) }.boxed() + async move { Ok(Response::Results(BTreeMap::new())) }.boxed() } } diff --git a/zebra-scan/src/service/tests.rs b/zebra-scan/src/service/tests.rs new file mode 100644 index 00000000000..6d29d00d209 --- /dev/null +++ b/zebra-scan/src/service/tests.rs @@ -0,0 +1,210 @@ +//! Tests for ScanService. + +use tower::{Service, ServiceExt}; + +use color_eyre::{eyre::eyre, Result}; + +use zebra_chain::{block::Height, parameters::Network}; +use zebra_node_services::scan_service::{request::Request, response::Response}; +use zebra_state::TransactionIndex; + +use crate::{ + init::ScanTaskCommand, + service::ScanService, + storage::db::tests::{fake_sapling_results, new_test_storage}, + tests::ZECPAGES_SAPLING_VIEWING_KEY, +}; + +/// Tests that keys are deleted correctly +#[tokio::test] +pub async fn scan_service_deletes_keys_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + !db.sapling_results(&zec_pages_sapling_efvk).is_empty(), + "there should be some results for this key in the db" + ); + + let (mut scan_service, cmd_receiver) = ScanService::new_with_mock_scanner(db); + + let response_fut = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::DeleteKeys(vec![zec_pages_sapling_efvk.clone()])); + + let expected_keys = vec![zec_pages_sapling_efvk.clone()]; + let cmd_handler_fut = tokio::task::spawn_blocking(move || { + let Ok(ScanTaskCommand::RemoveKeys { done_tx, keys }) = cmd_receiver.recv() else { + panic!("should successfully receive RemoveKeys message"); + }; + + assert_eq!(keys, expected_keys, "keys should match the request keys"); + + done_tx.send(()).expect("send should succeed"); + }); + + // Poll futures + let (response, join_result) = tokio::join!(response_fut, cmd_handler_fut); + join_result?; + + match response.map_err(|err| eyre!(err))? { + Response::DeletedKeys => {} + _ => panic!("scan service returned unexpected response variant"), + }; + + assert!( + scan_service + .db + .sapling_results(&zec_pages_sapling_efvk) + .is_empty(), + "all results for this key should have been deleted" + ); + + Ok(()) +} + +/// Tests that results are cleared are deleted correctly +#[tokio::test] +pub async fn scan_service_clears_results_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + !db.sapling_results(&zec_pages_sapling_efvk).is_empty(), + "there should be some results for this key in the db" + ); + + let (mut scan_service, _cmd_receiver) = ScanService::new_with_mock_scanner(db.clone()); + + let response = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::ClearResults(vec![zec_pages_sapling_efvk.clone()])) + .await + .map_err(|err| eyre!(err))?; + + match response { + Response::ClearedResults => {} + _ => panic!("scan service returned unexpected response variant"), + }; + + assert_eq!( + db.sapling_results(&zec_pages_sapling_efvk).len(), + 1, + "all results for this key should have been deleted, one empty entry should remain" + ); + + for (_, result) in db.sapling_results(&zec_pages_sapling_efvk) { + assert!( + result.is_empty(), + "there should be no results for this entry in the db" + ); + } + + Ok(()) +} + +/// Tests that results for key are returned correctly +#[tokio::test] +pub async fn scan_service_get_results_for_key_correctly() -> Result<()> { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + for fake_result_height in [Height::MIN, Height(1), Height::MAX] { + db.insert_sapling_results( + &zec_pages_sapling_efvk, + fake_result_height, + fake_sapling_results([ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]), + ); + } + + assert!( + db.sapling_results(&zec_pages_sapling_efvk).len() == 3, + "there should be 3 heights for this key in the db" + ); + + for (_height, transactions) in db.sapling_results(&zec_pages_sapling_efvk) { + assert!( + transactions.len() == 3, + "there should be 3 transactions for each height for this key in the db" + ); + } + + // We don't need to send any command to the scanner for this call. + let (mut scan_service, _cmd_receiver) = ScanService::new_with_mock_scanner(db); + + let response_fut = scan_service + .ready() + .await + .map_err(|err| eyre!(err))? + .call(Request::Results(vec![zec_pages_sapling_efvk.clone()])); + + match response_fut.await.map_err(|err| eyre!(err))? { + Response::Results(results) => { + assert!( + results.contains_key(&zec_pages_sapling_efvk), + "results should contain the requested key" + ); + assert!(results.len() == 1, "values are only for 1 key"); + + assert!( + results + .get_key_value(&zec_pages_sapling_efvk) + .unwrap() + .1 + .len() + == 3, + "we should have 3 heights for the given key " + ); + + for transactions in results + .get_key_value(&zec_pages_sapling_efvk) + .unwrap() + .1 + .values() + { + assert!( + transactions.len() == 3, + "there should be 3 transactions for each height for this key" + ); + } + } + _ => panic!("scan service returned unexpected response variant"), + }; + + Ok(()) +} diff --git a/zebra-scan/src/storage/db/sapling.rs b/zebra-scan/src/storage/db/sapling.rs index b7f1f0b40e3..e40a2b132c2 100644 --- a/zebra-scan/src/storage/db/sapling.rs +++ b/zebra-scan/src/storage/db/sapling.rs @@ -233,6 +233,31 @@ impl Storage { .write_batch() .expect("unexpected database write failure"); } + + /// Delete the sapling keys and their results, if they exist, + pub(crate) fn delete_sapling_keys(&mut self, keys: Vec) { + self.sapling_tx_ids_cf() + .new_batch_for_writing() + .delete_sapling_keys(keys) + .write_batch() + .expect("unexpected database write failure"); + } + + /// Delete the results of sapling scanning `keys`, if they exist + pub(crate) fn delete_sapling_results(&mut self, keys: Vec) { + let mut batch = self + .sapling_tx_ids_cf() + .new_batch_for_writing() + .delete_sapling_keys(keys.clone()); + + for key in &keys { + batch = batch.insert_sapling_height(key, Height::MIN); + } + + batch + .write_batch() + .expect("unexpected database write failure"); + } } /// Utility trait for inserting sapling heights into a WriteSaplingTxIdsBatch. @@ -252,3 +277,25 @@ impl<'cf> InsertSaplingHeight for WriteSaplingTxIdsBatch<'cf> { self.zs_insert(&index, &None) } } + +/// Utility trait for deleting sapling keys in a WriteSaplingTxIdsBatch. +trait DeleteSaplingKeys { + fn delete_sapling_keys(self, sapling_key: Vec) -> Self; +} + +impl<'cf> DeleteSaplingKeys for WriteSaplingTxIdsBatch<'cf> { + /// Delete sapling keys and their results. + fn delete_sapling_keys(mut self, sapling_keys: Vec) -> Self { + for key in &sapling_keys { + let from_index = SaplingScannedDatabaseIndex::min_for_key(key); + let until_strictly_before_index = SaplingScannedDatabaseIndex::max_for_key(key); + + self = self + .zs_delete_range(&from_index, &until_strictly_before_index) + // TODO: convert zs_delete_range() to take std::ops::RangeBounds + .zs_delete(&until_strictly_before_index); + } + + self + } +} diff --git a/zebra-scan/src/storage/db/tests.rs b/zebra-scan/src/storage/db/tests.rs index 765ec77e6ea..f34650ab262 100644 --- a/zebra-scan/src/storage/db/tests.rs +++ b/zebra-scan/src/storage/db/tests.rs @@ -6,8 +6,9 @@ use zebra_chain::{ block::{Block, Height}, parameters::Network::{self, *}, serialization::ZcashDeserializeInto, + transaction, }; -use zebra_state::TransactionIndex; +use zebra_state::{SaplingScannedResult, TransactionIndex}; use crate::{ storage::{Storage, INSERT_CONTROL_INTERVAL}, @@ -18,6 +19,9 @@ use crate::{ #[cfg(test)] mod snapshot; +#[cfg(test)] +mod vectors; + /// Returns an empty `Storage` suitable for testing. pub fn new_test_storage(network: Network) -> Storage { Storage::new(&Config::ephemeral(), network, false) @@ -74,3 +78,19 @@ pub fn add_fake_results( ); } } + +/// Accepts an iterator of [`TransactionIndex`]es and returns a `BTreeMap` with empty results +pub fn fake_sapling_results>( + transaction_indexes: T, +) -> BTreeMap { + let mut fake_sapling_results = BTreeMap::new(); + + for transaction_index in transaction_indexes { + fake_sapling_results.insert( + transaction_index, + SaplingScannedResult::from(transaction::Hash::from([0; 32])), + ); + } + + fake_sapling_results +} diff --git a/zebra-scan/src/storage/db/tests/vectors.rs b/zebra-scan/src/storage/db/tests/vectors.rs new file mode 100644 index 00000000000..f02165d7111 --- /dev/null +++ b/zebra-scan/src/storage/db/tests/vectors.rs @@ -0,0 +1,133 @@ +//! Fixed test vectors for the scanner Storage. + +use zebra_chain::{block::Height, parameters::Network}; +use zebra_state::TransactionIndex; + +use crate::{ + storage::db::tests::{fake_sapling_results, new_test_storage}, + tests::ZECPAGES_SAPLING_VIEWING_KEY, +}; + +/// Tests that keys are deleted correctly +#[test] +pub fn deletes_keys_and_results_correctly() { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + // Replace the last letter of the zec_pages efvk + let fake_efvk = format!( + "{}t", + &ZECPAGES_SAPLING_VIEWING_KEY[..ZECPAGES_SAPLING_VIEWING_KEY.len() - 1] + ); + + let efvks = [&zec_pages_sapling_efvk, &fake_efvk]; + let fake_heights = [Height::MIN, Height(1), Height::MAX]; + let fake_transaction_indexes = [ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]; + + for efvk in efvks { + for fake_result_height in fake_heights { + db.insert_sapling_results( + efvk, + fake_result_height, + fake_sapling_results(fake_transaction_indexes), + ); + } + } + + let expected_num_entries = fake_heights.len(); + let expected_num_results_per_entry = fake_transaction_indexes.len(); + + for efvk in efvks { + assert_eq!( + db.sapling_results(efvk).len(), + expected_num_entries, + "there should be {expected_num_entries} entries for this key in the db" + ); + + for (_, result) in db.sapling_results(efvk) { + assert_eq!( + result.len(), + expected_num_results_per_entry, + "there should be {expected_num_results_per_entry} results for this entry in the db" + ); + } + + db.delete_sapling_keys(vec![efvk.clone()]); + + assert!( + db.sapling_results(efvk).is_empty(), + "all results for this key should have been deleted" + ); + } +} + +/// Tests that keys are deleted correctly +#[test] +pub fn clears_results_correctly() { + let mut db = new_test_storage(Network::Mainnet); + + let zec_pages_sapling_efvk = ZECPAGES_SAPLING_VIEWING_KEY.to_string(); + + // Replace the last letter of the zec_pages efvk + let fake_efvk = format!( + "{}t", + &ZECPAGES_SAPLING_VIEWING_KEY[..ZECPAGES_SAPLING_VIEWING_KEY.len() - 1] + ); + + let efvks = [&zec_pages_sapling_efvk, &fake_efvk]; + let fake_heights = [Height::MIN, Height(1), Height::MAX]; + let fake_transaction_indexes = [ + TransactionIndex::MIN, + TransactionIndex::from_index(40), + TransactionIndex::MAX, + ]; + + for efvk in efvks { + for fake_result_height in fake_heights { + db.insert_sapling_results( + efvk, + fake_result_height, + fake_sapling_results(fake_transaction_indexes), + ); + } + } + + let expected_num_entries = fake_heights.len(); + let expected_num_results_per_entry = fake_transaction_indexes.len(); + + for efvk in efvks { + assert_eq!( + db.sapling_results(efvk).len(), + expected_num_entries, + "there should be {expected_num_entries} entries for this key in the db" + ); + + for (_, result) in db.sapling_results(efvk) { + assert_eq!( + result.len(), + expected_num_results_per_entry, + "there should be {expected_num_results_per_entry} results for this entry in the db" + ); + } + + db.delete_sapling_results(vec![efvk.clone()]); + + assert_eq!( + db.sapling_results(efvk).len(), + 1, + "all results for this key should have been deleted, one empty entry should remain" + ); + + for (_, result) in db.sapling_results(efvk) { + assert!( + result.is_empty(), + "there should be no results for this entry in the db" + ); + } + } +} diff --git a/zebra-state/Cargo.toml b/zebra-state/Cargo.toml index 628e73e826a..aaeaec4b8f2 100644 --- a/zebra-state/Cargo.toml +++ b/zebra-state/Cargo.toml @@ -47,14 +47,14 @@ elasticsearch = [ [dependencies] bincode = "1.3.3" -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std"] } dirs = "5.0.1" futures = "0.3.30" hex = "0.4.3" hex-literal = "0.4.1" humantime-serde = "1.1.1" -indexmap = "2.0.1" -itertools = "0.12.0" +indexmap = "2.2.1" +itertools = "0.12.1" lazy_static = "1.4.0" metrics = "0.22.0" mset = "0.1.1" @@ -62,7 +62,7 @@ regex = "1.10.3" rlimit = "0.10.1" rocksdb = { version = "0.21.0", default-features = false, features = ["lz4"] } semver = "1.0.21" -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } tempfile = "3.9.0" thiserror = "1.0.56" @@ -74,7 +74,7 @@ tracing = "0.1.39" # elasticsearch specific dependencies. # Security: avoid default dependency on openssl elasticsearch = { version = "8.5.0-alpha.1", default-features = false, features = ["rustls-tls"], optional = true } -serde_json = { version = "1.0.111", package = "serde_json", optional = true } +serde_json = { version = "1.0.113", package = "serde_json", optional = true } zebra-chain = { path = "../zebra-chain", version = "1.0.0-beta.34", features = ["async-error"] } diff --git a/zebra-test/Cargo.toml b/zebra-test/Cargo.toml index edc0e99c0af..1acc7c3933b 100644 --- a/zebra-test/Cargo.toml +++ b/zebra-test/Cargo.toml @@ -16,10 +16,10 @@ categories = ["command-line-utilities", "cryptography::cryptocurrencies"] [dependencies] hex = "0.4.3" -indexmap = "2.0.1" +indexmap = "2.2.1" lazy_static = "1.4.0" insta = "1.33.0" -itertools = "0.12.0" +itertools = "0.12.1" proptest = "1.4.0" once_cell = "1.18.0" rand = "0.8.5" diff --git a/zebra-utils/Cargo.toml b/zebra-utils/Cargo.toml index e577e3a7e98..ee9ae067562 100644 --- a/zebra-utils/Cargo.toml +++ b/zebra-utils/Cargo.toml @@ -82,7 +82,7 @@ tinyvec = { version = "1.6.0", features = ["rustc_1_55"] } structopt = "0.3.26" hex = "0.4.3" -serde_json = "1.0.111" +serde_json = "1.0.113" tracing-error = "0.2.0" tracing-subscriber = "0.3.18" thiserror = "1.0.56" @@ -95,7 +95,7 @@ zebra-scan = { path = "../zebra-scan", version = "0.1.0-alpha.3", optional = tru zebra-rpc = { path = "../zebra-rpc", version = "1.0.0-beta.34", optional = true } # These crates are needed for the zebra-checkpoints binary -itertools = { version = "0.12.0", optional = true } +itertools = { version = "0.12.1", optional = true } # These crates are needed for the search-issue-refs binary regex = { version = "1.10.3", optional = true } diff --git a/zebrad/Cargo.toml b/zebrad/Cargo.toml index a16e75132e9..9a9cad3071b 100644 --- a/zebrad/Cargo.toml +++ b/zebrad/Cargo.toml @@ -173,12 +173,12 @@ zebra-utils = { path = "../zebra-utils", version = "1.0.0-beta.34", optional = t abscissa_core = "0.7.0" clap = { version = "4.4.16", features = ["cargo"] } -chrono = { version = "0.4.32", default-features = false, features = ["clock", "std"] } +chrono = { version = "0.4.33", default-features = false, features = ["clock", "std"] } humantime-serde = "1.1.1" -indexmap = "2.0.1" +indexmap = "2.2.1" lazy_static = "1.4.0" semver = "1.0.21" -serde = { version = "1.0.193", features = ["serde_derive"] } +serde = { version = "1.0.196", features = ["serde_derive"] } toml = "0.8.8" futures = "0.3.30" @@ -186,7 +186,7 @@ rayon = "1.8.1" tokio = { version = "1.35.1", features = ["time", "rt-multi-thread", "macros", "tracing", "signal"] } tokio-stream = { version = "0.1.14", features = ["time"] } tower = { version = "0.4.13", features = ["hedge", "limit"] } -pin-project = "1.1.3" +pin-project = "1.1.4" color-eyre = { version = "0.6.2", default-features = false, features = ["issue-url"] } # This is a transitive dependency via color-eyre. @@ -261,7 +261,7 @@ regex = "1.10.3" insta = { version = "1.33.0", features = ["json"] } # zebra-rpc needs the preserve_order feature, it also makes test results more stable -serde_json = { version = "1.0.111", features = ["preserve_order"] } +serde_json = { version = "1.0.113", features = ["preserve_order"] } tempfile = "3.9.0" hyper = { version = "0.14.28", features = ["http1", "http2", "server"]} diff --git a/zebrad/src/commands/start.rs b/zebrad/src/commands/start.rs index e9b98cc523d..bbc8c69833b 100644 --- a/zebrad/src/commands/start.rs +++ b/zebrad/src/commands/start.rs @@ -301,18 +301,25 @@ impl StartCmd { #[cfg(feature = "shielded-scan")] // Spawn never ending scan task only if we have keys to scan for. - let scan_task_handle = if !config.shielded_scan.sapling_keys_to_scan.is_empty() { - // TODO: log the number of keys and update the scan_task_starts() test - info!("spawning shielded scanner with configured viewing keys"); - zebra_scan::spawn_init( - &config.shielded_scan, - config.network.network, - state, - chain_tip_change, - ) - } else { - tokio::spawn(std::future::pending().in_current_span()) - }; + let (scan_task_handle, _cmd_sender) = + if !config.shielded_scan.sapling_keys_to_scan.is_empty() { + // TODO: log the number of keys and update the scan_task_starts() test + info!("spawning shielded scanner with configured viewing keys"); + let scan_task = zebra_scan::ScanTask::spawn( + &config.shielded_scan, + config.network.network, + state, + chain_tip_change, + ); + + ( + std::sync::Arc::into_inner(scan_task.handle) + .expect("should only have one reference here"), + Some(scan_task.cmd_sender), + ) + } else { + (tokio::spawn(std::future::pending().in_current_span()), None) + }; #[cfg(not(feature = "shielded-scan"))] // Spawn a dummy scan task which doesn't do anything and never finishes.