diff --git a/.github/workflows/docker-reproducible.yml b/.github/workflows/docker-reproducible.yml new file mode 100644 index 00000000000..f3479e9468d --- /dev/null +++ b/.github/workflows/docker-reproducible.yml @@ -0,0 +1,176 @@ +name: docker-reproducible + +on: + push: + branches: + - unstable + - stable + tags: + - v* + workflow_dispatch: # allows manual triggering for testing purposes and skips publishing an image + +env: + DOCKER_REPRODUCIBLE_IMAGE_NAME: >- + ${{ github.repository_owner }}/lighthouse-reproducible + DOCKER_PASSWORD: ${{ secrets.DH_KEY }} + DOCKER_USERNAME: ${{ secrets.DH_ORG }} + +jobs: + extract-version: + name: extract version + runs-on: ubuntu-22.04 + steps: + - name: Extract version + run: | + if [[ "${{ github.ref }}" == refs/tags/* ]]; then + # It's a tag (e.g., v1.2.3) + VERSION="${GITHUB_REF#refs/tags/}" + elif [[ "${{ github.ref }}" == refs/heads/stable ]]; then + # stable branch -> latest + VERSION="latest" + elif [[ "${{ github.ref }}" == refs/heads/unstable ]]; then + # unstable branch -> latest-unstable + VERSION="latest-unstable" + else + # For manual triggers from other branches and will not publish any image + VERSION="test-build" + fi + echo "VERSION=$VERSION" >> $GITHUB_OUTPUT + id: extract_version + outputs: + VERSION: ${{ steps.extract_version.outputs.VERSION }} + + verify-and-build: + name: verify reproducibility and build + needs: extract-version + strategy: + matrix: + arch: [amd64, arm64] + include: + - arch: amd64 + rust_target: x86_64-unknown-linux-gnu + rust_image: >- + rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9e315fd2cb5100e87a7187a9816 + platform: linux/amd64 + runner: ubuntu-22.04 + - arch: arm64 + rust_target: aarch64-unknown-linux-gnu + rust_image: >- + rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 + platform: linux/arm64 + runner: ubuntu-22.04-arm + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver: docker + + - name: Verify reproducible builds (${{ matrix.arch }}) + run: | + # Build first image + docker build -f Dockerfile.reproducible \ + --platform ${{ matrix.platform }} \ + --build-arg RUST_TARGET="${{ matrix.rust_target }}" \ + --build-arg RUST_IMAGE="${{ matrix.rust_image }}" \ + -t lighthouse-verify-1-${{ matrix.arch }} . + + # Extract binary from first build + docker create --name extract-1-${{ matrix.arch }} lighthouse-verify-1-${{ matrix.arch }} + docker cp extract-1-${{ matrix.arch }}:/lighthouse ./lighthouse-1-${{ matrix.arch }} + docker rm extract-1-${{ matrix.arch }} + + # Clean state for second build + docker buildx prune -f + docker system prune -f + + # Build second image + docker build -f Dockerfile.reproducible \ + --platform ${{ matrix.platform }} \ + --build-arg RUST_TARGET="${{ matrix.rust_target }}" \ + --build-arg RUST_IMAGE="${{ matrix.rust_image }}" \ + -t lighthouse-verify-2-${{ matrix.arch }} . + + # Extract binary from second build + docker create --name extract-2-${{ matrix.arch }} lighthouse-verify-2-${{ matrix.arch }} + docker cp extract-2-${{ matrix.arch }}:/lighthouse ./lighthouse-2-${{ matrix.arch }} + docker rm extract-2-${{ matrix.arch }} + + # Compare binaries + echo "=== Comparing binaries ===" + echo "Build 1 SHA256: $(sha256sum lighthouse-1-${{ matrix.arch }})" + echo "Build 2 SHA256: $(sha256sum lighthouse-2-${{ matrix.arch }})" + + if cmp lighthouse-1-${{ matrix.arch }} lighthouse-2-${{ matrix.arch }}; then + echo "Reproducible build verified for ${{ matrix.arch }}" + else + echo "Reproducible build FAILED for ${{ matrix.arch }}" + echo "BLOCKING RELEASE: Builds are not reproducible!" + echo "First 10 differences:" + cmp -l lighthouse-1-${{ matrix.arch }} lighthouse-2-${{ matrix.arch }} | head -10 + exit 1 + fi + + # Clean up verification artifacts but keep one image for publishing + rm -f lighthouse-*-${{ matrix.arch }} + docker rmi lighthouse-verify-1-${{ matrix.arch }} || true + + # Re-tag the second image for publishing (we verified it's identical to first) + VERSION=${{ needs.extract-version.outputs.VERSION }} + FINAL_TAG="${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" + docker tag lighthouse-verify-2-${{ matrix.arch }} "$FINAL_TAG" + + - name: Log in to Docker Hub + if: ${{ github.event_name != 'workflow_dispatch' }} + uses: docker/login-action@v3 + with: + username: ${{ env.DOCKER_USERNAME }} + password: ${{ env.DOCKER_PASSWORD }} + + - name: Push verified image (${{ matrix.arch }}) + if: ${{ github.event_name != 'workflow_dispatch' }} + run: | + VERSION=${{ needs.extract-version.outputs.VERSION }} + IMAGE_TAG="${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" + docker push "$IMAGE_TAG" + + - name: Clean up local images + run: | + docker rmi lighthouse-verify-2-${{ matrix.arch }} || true + VERSION=${{ needs.extract-version.outputs.VERSION }} + docker rmi "${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }}:${VERSION}-${{ matrix.arch }}" || true + + - name: Upload verification artifacts (on failure) + if: failure() + uses: actions/upload-artifact@v4 + with: + name: verification-failure-${{ matrix.arch }} + path: | + lighthouse-*-${{ matrix.arch }} + + create-manifest: + name: create multi-arch manifest + runs-on: ubuntu-22.04 + needs: [extract-version, verify-and-build] + if: ${{ github.event_name != 'workflow_dispatch' }} + steps: + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ env.DOCKER_USERNAME }} + password: ${{ env.DOCKER_PASSWORD }} + + - name: Create and push multi-arch manifest + run: | + IMAGE_NAME=${{ env.DOCKER_REPRODUCIBLE_IMAGE_NAME }} + VERSION=${{ needs.extract-version.outputs.VERSION }} + + # Create manifest for the version tag + docker manifest create \ + ${IMAGE_NAME}:${VERSION} \ + ${IMAGE_NAME}:${VERSION}-amd64 \ + ${IMAGE_NAME}:${VERSION}-arm64 + + docker manifest push ${IMAGE_NAME}:${VERSION} diff --git a/.github/workflows/local-testnet.yml b/.github/workflows/local-testnet.yml index c6f9c075dbd..9992273e0a7 100644 --- a/.github/workflows/local-testnet.yml +++ b/.github/workflows/local-testnet.yml @@ -14,7 +14,7 @@ concurrency: jobs: dockerfile-ubuntu: - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 @@ -31,7 +31,7 @@ jobs: retention-days: 3 run-local-testnet: - runs-on: ubuntu-22.04 + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu steps: - uses: actions/checkout@v5 @@ -89,7 +89,7 @@ jobs: ${{ steps.assertoor_test_result.outputs.failed_test_details }} EOF ) - + echo "Test Result: $test_result" echo "$test_status" if ! [ "$test_result" == "success" ]; then @@ -100,7 +100,7 @@ jobs: doppelganger-protection-success-test: needs: dockerfile-ubuntu - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 @@ -136,7 +136,7 @@ jobs: doppelganger-protection-failure-test: needs: dockerfile-ubuntu - runs-on: ubuntu-22.04 + runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 @@ -173,13 +173,13 @@ jobs: # Tests checkpoint syncing to a live network (current fork) and a running devnet (usually next scheduled fork) checkpoint-sync-test: name: checkpoint-sync-test-${{ matrix.network }} - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu if: contains(github.event.pull_request.labels.*.name, 'syncing') continue-on-error: true strategy: matrix: - network: [sepolia, devnet] + network: [sepolia] steps: - uses: actions/checkout@v5 @@ -216,7 +216,7 @@ jobs: # Test syncing from genesis on a local testnet. Aims to cover forward syncing both short and long distances. genesis-sync-test: name: genesis-sync-test-${{ matrix.fork }}-${{ matrix.offline_secs }}s - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: dockerfile-ubuntu strategy: matrix: @@ -259,7 +259,7 @@ jobs: # a PR is safe to merge. New jobs should be added here. local-testnet-success: name: local-testnet-success - runs-on: ubuntu-latest + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} needs: [ 'dockerfile-ubuntu', 'run-local-testnet', @@ -272,4 +272,4 @@ jobs: - name: Check that success job is dependent on all others run: | exclude_jobs='checkpoint-sync-test' - ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs" + ./scripts/ci/check-success-job.sh ./.github/workflows/local-testnet.yml local-testnet-success "$exclude_jobs" diff --git a/.github/workflows/nightly-tests.yml b/.github/workflows/nightly-tests.yml new file mode 100644 index 00000000000..be52c5b84d3 --- /dev/null +++ b/.github/workflows/nightly-tests.yml @@ -0,0 +1,135 @@ +# We only run tests on `RECENT_FORKS` on CI. To make sure we don't break prior forks, we run nightly tests to cover all prior forks. +name: nightly-tests + +on: + schedule: + # Run at 8:30 AM UTC every day + - cron: '30 8 * * *' + workflow_dispatch: # Allow manual triggering + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +env: + # Deny warnings in CI + # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) + RUSTFLAGS: "-D warnings -C debuginfo=0" + # Prevent Github API rate limiting. + LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} + # Disable incremental compilation + CARGO_INCREMENTAL: 0 + # Enable portable to prevent issues with caching `blst` for the wrong CPU type + TEST_FEATURES: portable + +jobs: + setup-matrix: + name: setup-matrix + runs-on: ubuntu-latest + outputs: + forks: ${{ steps.set-matrix.outputs.forks }} + steps: + - name: Set matrix + id: set-matrix + run: | + # All prior forks to cover in nightly tests. This list should be updated when we remove a fork from `RECENT_FORKS`. + echo 'forks=["phase0", "altair", "bellatrix", "capella", "deneb"]' >> $GITHUB_OUTPUT + + beacon-chain-tests: + name: beacon-chain-tests + needs: setup-matrix + runs-on: 'ubuntu-latest' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run beacon_chain tests for ${{ matrix.fork }} + run: make test-beacon-chain-${{ matrix.fork }} + timeout-minutes: 60 + + http-api-tests: + name: http-api-tests + needs: setup-matrix + runs-on: 'ubuntu-latest' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run http_api tests for ${{ matrix.fork }} + run: make test-http-api-${{ matrix.fork }} + timeout-minutes: 60 + + op-pool-tests: + name: op-pool-tests + needs: setup-matrix + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Run operation_pool tests for ${{ matrix.fork }} + run: make test-op-pool-${{ matrix.fork }} + timeout-minutes: 60 + + network-tests: + name: network-tests + needs: setup-matrix + runs-on: ubuntu-latest + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + strategy: + matrix: + fork: ${{ fromJson(needs.setup-matrix.outputs.forks) }} + fail-fast: false + steps: + - uses: actions/checkout@v5 + - name: Get latest version of stable Rust + uses: moonrepo/setup-rust@v1 + with: + channel: stable + cache-target: release + bins: cargo-nextest + - name: Create CI logger dir + run: mkdir ${{ runner.temp }}/network_test_logs + - name: Run network tests for ${{ matrix.fork }} + run: make test-network-${{ matrix.fork }} + timeout-minutes: 60 + env: + TEST_FEATURES: portable + CI_LOGGER_DIR: ${{ runner.temp }}/network_test_logs + - name: Upload logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: network_test_logs_${{ matrix.fork }} + path: ${{ runner.temp }}/network_test_logs diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0cdd8211da8..7344a9367b7 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -22,8 +22,6 @@ env: # NOTE: this token is a personal access token on Jimmy's account due to the default GITHUB_TOKEN # not having access to other repositories. We should eventually devise a better solution here. LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} - # Enable self-hosted runners for the sigp repo only. - SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Disable incremental compilation CARGO_INCREMENTAL: 0 # Enable portable to prevent issues with caching `blst` for the wrong CPU type @@ -78,8 +76,7 @@ jobs: name: release-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 # Set Java version to 21. (required since Web3Signer 24.12.0). @@ -88,7 +85,6 @@ jobs: distribution: 'temurin' java-version: '21' - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -97,7 +93,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d @@ -111,14 +106,12 @@ jobs: name: beacon-chain-tests needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -126,22 +119,16 @@ jobs: bins: cargo-nextest - name: Run beacon_chain tests for all known forks run: make test-beacon-chain - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats http-api-tests: name: http-api-tests needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -149,10 +136,6 @@ jobs: bins: cargo-nextest - name: Run http_api tests for all recent forks run: make test-http-api - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats op-pool-tests: name: op-pool-tests needs: [check-labels] @@ -220,29 +203,22 @@ jobs: name: debug-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "large"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable bins: cargo-nextest - name: Install Foundry (anvil) - if: env.SELF_HOSTED_RUNNERS == 'false' uses: foundry-rs/foundry-toolchain@v1 with: version: nightly-ca67d15f4abd46394b324c50e21e66f306a1162d - name: Run tests in debug run: make test-debug - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats state-transition-vectors-ubuntu: name: state-transition-vectors-ubuntu needs: [check-labels] @@ -261,14 +237,12 @@ jobs: name: ef-tests-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - # Use self-hosted runners only on the sigp repo. - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -276,10 +250,6 @@ jobs: bins: cargo-nextest - name: Run consensus-spec-tests with blst and fake_crypto run: make test-ef - - name: Show cache stats - if: env.SELF_HOSTED_RUNNERS == 'true' - continue-on-error: true - run: sccache --show-stats basic-simulator-ubuntu: name: basic-simulator-ubuntu needs: [check-labels] @@ -328,11 +298,10 @@ jobs: name: execution-engine-integration-ubuntu needs: [check-labels] if: needs.check-labels.outputs.skip_ci != 'true' - runs-on: ${{ github.repository == 'sigp/lighthouse' && fromJson('["self-hosted", "linux", "CI", "small"]') || 'ubuntu-latest' }} + runs-on: ${{ github.repository == 'sigp/lighthouse' && 'warp-ubuntu-latest-x64-8x' || 'ubuntu-latest' }} steps: - uses: actions/checkout@v5 - name: Get latest version of stable Rust - if: env.SELF_HOSTED_RUNNERS == 'false' uses: moonrepo/setup-rust@v1 with: channel: stable @@ -340,9 +309,6 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Add go compiler to $PATH - if: env.SELF_HOSTED_RUNNERS == 'true' - run: echo "/usr/local/go/bin" >> $GITHUB_PATH - name: Run exec engine integration tests in release run: make test-exec-engine check-code: @@ -358,7 +324,7 @@ jobs: channel: stable cache-target: release components: rustfmt,clippy - bins: cargo-audit + bins: cargo-audit,cargo-deny - name: Check formatting with cargo fmt run: make cargo-fmt - name: Lint code for quality and style with Clippy @@ -371,6 +337,8 @@ jobs: run: make arbitrary-fuzz - name: Run cargo audit run: make audit-CI + - name: Run cargo deny + run: make deny-CI - name: Run cargo vendor to make sure dependencies can be vendored for packaging, reproducibility and archival purpose run: CARGO_HOME=$(readlink -f $HOME) make vendor - name: Markdown-linter diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000000..65447c4390a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,5 @@ +{ + "rust-analyzer.cargo.cfgs": [ + "!debug_assertions" + ] +} diff --git a/Cargo.lock b/Cargo.lock index 9026b29e460..6ed7bfd0b60 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,7 +4,7 @@ version = 4 [[package]] name = "account_manager" -version = "0.3.5" +version = "8.0.1" dependencies = [ "account_utils", "bls", @@ -34,10 +34,11 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "eth2_wallet", "filesystem", - "rand 0.9.0", + "rand 0.9.2", "regex", "rpassword", "serde", @@ -48,20 +49,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "addr2line" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" -dependencies = [ - "gimli", -] - [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -70,7 +62,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0" dependencies = [ "crypto-common", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -83,7 +75,7 @@ dependencies = [ "cipher 0.3.0", "cpufeatures", "ctr 0.8.0", - "opaque-debug 0.3.1", + "opaque-debug", ] [[package]] @@ -113,21 +105,21 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -138,21 +130,70 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" +[[package]] +name = "alloy-chains" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bc32535569185cbcb6ad5fa64d989a47bccb9a08e27284b1f2a3ccf16e6d010" +dependencies = [ + "alloy-primitives", + "num_enum", + "strum", +] + [[package]] name = "alloy-consensus" -version = "0.14.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2179ba839ac532f50279f5da2a6c5047f791f03f6f808b4dfab11327b97902f" +checksum = "2e318e25fb719e747a7e8db1654170fc185024f3ed5b10f86c08d448a912f6e2" dependencies = [ "alloy-eips", "alloy-primitives", "alloy-rlp", + "alloy-serde", "alloy-trie", - "auto_impl 1.2.1", + "alloy-tx-macros", + "auto_impl", + "borsh", + "c-kzg", "derive_more 2.0.1", "either", + "k256", "once_cell", - "thiserror 2.0.12", + "rand 0.8.5", + "secp256k1", + "serde", + "serde_json", + "serde_with", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-consensus-any" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "364380a845193a317bcb7a5398fc86cdb66c47ebe010771dde05f6869bf9e64a" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-dyn-abi" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdff496dd4e98a81f4861e66f7eaf5f2488971848bb42d9c892f871730245c8" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-type-parser", + "alloy-sol-types", + "itoa", + "winnow", ] [[package]] @@ -164,83 +205,198 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "crc", - "thiserror 2.0.12", + "serde", + "thiserror 2.0.17", ] [[package]] name = "alloy-eip2930" -version = "0.2.0" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbe3e16484669964c26ac48390245d84c410b1a5f968976076c17184725ef235" +checksum = "9441120fa82df73e8959ae0e4ab8ade03de2aaae61be313fbf5746277847ce25" dependencies = [ "alloy-primitives", "alloy-rlp", + "borsh", + "serde", ] [[package]] name = "alloy-eip7702" -version = "0.6.0" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "804cefe429015b4244966c006d25bda5545fa9db5990e9c9079faf255052f50a" +checksum = "2919c5a56a1007492da313e7a3b6d45ef5edc5d33416fdec63c0d7a2702a0d20" dependencies = [ "alloy-primitives", "alloy-rlp", - "thiserror 2.0.12", + "borsh", + "serde", + "thiserror 2.0.17", ] [[package]] name = "alloy-eips" -version = "0.14.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "609515c1955b33af3d78d26357540f68c5551a90ef58fd53def04f2aa074ec43" +checksum = "a4c4d7c5839d9f3a467900c625416b24328450c65702eb3d8caff8813e4d1d33" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", "alloy-primitives", "alloy-rlp", - "auto_impl 1.2.1", + "alloy-serde", + "auto_impl", + "borsh", + "c-kzg", "derive_more 2.0.1", "either", - "sha2 0.10.8", + "serde", + "serde_with", + "sha2 0.10.9", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-json-abi" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5513d5e6bd1cba6bdcf5373470f559f320c05c8c59493b6e98912fbe6733943f" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-rpc" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f72cf87cda808e593381fb9f005ffa4d2475552b7a6c5ac33d087bf77d82abd0" +dependencies = [ + "alloy-primitives", + "alloy-sol-types", + "http 1.3.1", + "serde", + "serde_json", + "thiserror 2.0.17", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12aeb37b6f2e61b93b1c3d34d01ee720207c76fe447e2a2c217e433ac75b17f5" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-json-rpc", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-types-any", + "alloy-rpc-types-eth", + "alloy-serde", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "auto_impl", + "derive_more 2.0.1", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-network-primitives" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd29ace62872083e30929cd9b282d82723196d196db589f3ceda67edcc05552" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-primitives", + "alloy-serde", + "serde", ] [[package]] name = "alloy-primitives" -version = "1.0.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b98b99c1dcfbe74d7f0b31433ff215e7d1555e367d90e62db904f3c9d4ff53" +checksum = "355bf68a433e0fd7f7d33d5a9fc2583fde70bf5c530f63b80845f8da5505cf28" dependencies = [ "alloy-rlp", "arbitrary", "bytes", "cfg-if", "const-hex", - "derive_arbitrary", "derive_more 2.0.1", - "foldhash", - "getrandom 0.3.1", - "hashbrown 0.15.2", - "indexmap 2.8.0", + "foldhash 0.2.0", + "getrandom 0.3.4", + "hashbrown 0.16.0", + "indexmap 2.12.0", "itoa", - "k256 0.13.4", + "k256", "keccak-asm", "paste", "proptest", "proptest-derive", - "rand 0.9.0", + "rand 0.9.2", "ruint", "rustc-hash 2.1.1", "serde", - "sha3 0.10.8", + "sha3", "tiny-keccak", ] +[[package]] +name = "alloy-provider" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b710636d7126e08003b8217e24c09f0cca0b46d62f650a841736891b1ed1fc1" +dependencies = [ + "alloy-chains", + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-signer", + "alloy-sol-types", + "alloy-transport", + "alloy-transport-http", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "either", + "futures", + "futures-utils-wasm", + "lru 0.13.0", + "parking_lot", + "pin-project", + "reqwest", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "wasmtimer", +] + [[package]] name = "alloy-rlp" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6c1d995bff8d011f7cd6c81820d51825e6e06d6db73914c1630ecf544d83d6" +checksum = "5f70d83b765fdc080dbcd4f4db70d8d23fe4761f2f02ebfa9146b833900634b4" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -249,35 +405,247 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.11" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40e1ef334153322fd878d07e86af7a529bcb86b2439525920a88eba87bcf943" +checksum = "64b728d511962dda67c1bc7ea7c03736ec275ed2cf4c35d9585298ac9ccf3b73" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", +] + +[[package]] +name = "alloy-rpc-client" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0882e72d2c1c0c79dcf4ab60a67472d3f009a949f774d4c17d0bdb669cfde05" +dependencies = [ + "alloy-json-rpc", + "alloy-primitives", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "reqwest", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-rpc-types-any" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a63fb40ed24e4c92505f488f9dd256e2afaed17faa1b7a221086ebba74f4122" +dependencies = [ + "alloy-consensus-any", + "alloy-rpc-types-eth", + "alloy-serde", +] + +[[package]] +name = "alloy-rpc-types-eth" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eae0c7c40da20684548cbc8577b6b7447f7bf4ddbac363df95e3da220e41e72" +dependencies = [ + "alloy-consensus", + "alloy-consensus-any", + "alloy-eips", + "alloy-network-primitives", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.14.0", + "serde", + "serde_json", + "serde_with", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-serde" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0df1987ed0ff2d0159d76b52e7ddfc4e4fbddacc54d2fbee765e0d14d7c01b5" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ff69deedee7232d7ce5330259025b868c5e6a52fa8dffda2c861fb3a5889b24" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "either", + "elliptic-curve", + "k256", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-signer-local" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72cfe0be3ec5a8c1a46b2e5a7047ed41121d360d97f4405bb7c1c784880c86cb" +dependencies = [ + "alloy-consensus", + "alloy-network", + "alloy-primitives", + "alloy-signer", + "async-trait", + "k256", + "rand 0.8.5", + "thiserror 2.0.17", +] + +[[package]] +name = "alloy-sol-macro" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ce480400051b5217f19d6e9a82d9010cdde20f1ae9c00d53591e4a1afbb312" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d792e205ed3b72f795a8044c52877d2e6b6e9b1d13f431478121d8d4eaa9028" +dependencies = [ + "alloy-sol-macro-input", + "const-hex", + "heck", + "indexmap 2.12.0", + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.110", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bd1247a8f90b465ef3f1207627547ec16940c35597875cdc09c49d58b19693c" +dependencies = [ + "const-hex", + "dunce", + "heck", + "macro-string", + "proc-macro2", + "quote", + "syn 2.0.110", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "954d1b2533b9b2c7959652df3076954ecb1122a28cc740aa84e7b0a49f6ac0a9" +dependencies = [ + "serde", + "winnow", +] + +[[package]] +name = "alloy-sol-types" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70319350969a3af119da6fb3e9bddb1bce66c9ea933600cb297c8b1850ad2a3c" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "serde", +] + +[[package]] +name = "alloy-transport" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be98b07210d24acf5b793c99b759e9a696e4a2e67593aec0487ae3b3e1a2478c" +dependencies = [ + "alloy-json-rpc", + "auto_impl", + "base64 0.22.1", + "derive_more 2.0.1", + "futures", + "futures-utils-wasm", + "parking_lot", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tower 0.5.2", + "tracing", + "url", + "wasmtimer", +] + +[[package]] +name = "alloy-transport-http" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4198a1ee82e562cab85e7f3d5921aab725d9bd154b6ad5017f82df1695877c97" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "reqwest", + "serde_json", + "tower 0.5.2", + "tracing", + "url", ] [[package]] name = "alloy-trie" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "983d99aa81f586cef9dae38443245e585840fcf0fc58b09aee0b1f27aed1d500" +checksum = "e3412d52bb97c6c6cc27ccc28d4e6e8cf605469101193b50b0bd5813b1f990b5" dependencies = [ "alloy-primitives", "alloy-rlp", "arrayvec", "derive_more 2.0.1", "nybbles", + "serde", "smallvec", "tracing", ] [[package]] -name = "android-tzdata" -version = "0.1.1" +name = "alloy-tx-macros" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "333544408503f42d7d3792bfc0f7218b643d968a03d2c0ed383ae558fb4a76d0" +dependencies = [ + "darling 0.21.3", + "proc-macro2", + "quote", + "syn 2.0.110", +] [[package]] name = "android_system_properties" @@ -296,9 +664,9 @@ checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" [[package]] name = "anstream" -version = "0.6.18" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -311,50 +679,50 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.2" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "anstyle-wincon" -version = "3.0.7" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3534e77181a9cc07539ad51f2141fe32f6c3ffd4df76db8ad92346b003ae4e" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", - "once_cell", - "windows-sys 0.59.0", + "once_cell_polyfill", + "windows-sys 0.61.2", ] [[package]] name = "anyhow" -version = "1.0.97" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" +checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" dependencies = [ "derive_arbitrary", ] @@ -412,6 +780,26 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ark-ff" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a177aba0ed1e0fbb62aa9f6d0502e9b46dad8c2eab04c14258a1212d2557ea70" +dependencies = [ + "ark-ff-asm 0.5.0", + "ark-ff-macros 0.5.0", + "ark-serialize 0.5.0", + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "educe", + "itertools 0.13.0", + "num-bigint", + "num-traits", + "paste", + "zeroize", +] + [[package]] name = "ark-ff-asm" version = "0.3.0" @@ -432,6 +820,16 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-asm" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" +dependencies = [ + "quote", + "syn 2.0.110", +] + [[package]] name = "ark-ff-macros" version = "0.3.0" @@ -457,6 +855,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-ff-macros" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09be120733ee33f7693ceaa202ca41accd5653b779563608f1234f78ae07c4b3" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "ark-serialize" version = "0.3.0" @@ -478,6 +889,18 @@ dependencies = [ "num-bigint", ] +[[package]] +name = "ark-serialize" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f4d068aaf107ebcd7dfb52bc748f8030e0fc930ac8e360146ca54c1203088f7" +dependencies = [ + "ark-std 0.5.0", + "arrayvec", + "digest 0.10.7", + "num-bigint", +] + [[package]] name = "ark-std" version = "0.3.0" @@ -498,6 +921,16 @@ dependencies = [ "rand 0.8.5", ] +[[package]] +name = "ark-std" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "246a225cc6131e9ee4f24619af0f19d67761fff15d7ccc22e42b80846e69449a" +dependencies = [ + "num-traits", + "rand 0.8.5", +] + [[package]] name = "arraydeque" version = "0.5.1" @@ -515,6 +948,9 @@ name = "arrayvec" version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] [[package]] name = "asn1-rs" @@ -528,7 +964,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -540,7 +976,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] @@ -552,7 +988,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -584,9 +1020,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "2.3.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +checksum = "924ed96dd52d1b75e9c1a3e6275715fd320f5f9439fb5a4a11fa51f4221158d2" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -596,32 +1032,20 @@ dependencies = [ [[package]] name = "async-io" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a2b323ccce0a1d90b449fd71f2a06ca7faa7c54c2751f06c9bd851fc061059" -dependencies = [ - "async-lock", - "cfg-if", - "concurrent-queue", - "futures-io", - "futures-lite", - "parking", - "polling", - "rustix 0.38.44", - "slab", - "tracing", - "windows-sys 0.59.0", -] - -[[package]] -name = "async-lock" -version = "3.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "event-listener 5.4.0", - "event-listener-strategy", - "pin-project-lite", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix 1.1.2", + "slab", + "windows-sys 0.61.2", ] [[package]] @@ -643,29 +1067,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "async-trait" -version = "0.1.87" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d556ec1359574147ec0c4fc5eb525f3f23263a592b1a9c07e0a75b427de55c97" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version 0.4.1", + "syn 2.0.110", ] [[package]] @@ -694,39 +1107,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ "base64 0.22.1", - "http 1.3.0", + "http 1.3.1", "log", "url", ] [[package]] name = "auto_impl" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7862e21c893d65a1650125d157eaeec691439379a1cee17ee49031b79236ada4" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "auto_impl" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e12882f59de5360c748c4cbf569a042d5fb0eb515f7bea9c1f470b47f6ffbd73" +checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "axum" @@ -738,7 +1139,7 @@ dependencies = [ "axum-core", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", "itoa", @@ -749,7 +1150,7 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower 0.5.2", "tower-layer", "tower-service", @@ -764,44 +1165,23 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", ] -[[package]] -name = "backtrace" -version = "0.3.74" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", - "windows-targets 0.52.6", -] - [[package]] name = "base-x" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -809,27 +1189,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" [[package]] -name = "base58" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" - -[[package]] -name = "base58check" -version = "0.1.0" +name = "base256emoji" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ee2fe4c9a0c84515f136aaae2466744a721af6d63339c18689d9e995d74d99b" +checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c" dependencies = [ - "base58", - "sha2 0.8.2", + "const-str", + "match-lookup", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.1" @@ -850,19 +1218,19 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb97d56060ee67d285efb8001fec9d2a4c710c32efd2e14b5cbb5ba71930fc2d" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "beacon_chain" version = "0.2.0" dependencies = [ "alloy-primitives", - "bitvec 1.0.1", + "bitvec", "bls", "criterion", - "derivative", + "educe", "eth2", "eth2_network_config", "ethereum_hashing", @@ -870,6 +1238,7 @@ dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "genesis", @@ -880,18 +1249,19 @@ dependencies = [ "lighthouse_tracing", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "maplit", "merkle_proof", "metrics", + "milhouse", "mockall", "mockall_double", "once_cell", "oneshot_broadcast", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "rayon", "safe_arith", "sensitive_url", @@ -912,6 +1282,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", "zstd 0.13.3", ] @@ -922,6 +1293,7 @@ version = "8.0.1" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", "client", @@ -933,7 +1305,7 @@ dependencies = [ "genesis", "hex", "http_api", - "hyper 1.6.0", + "hyper 1.8.1", "lighthouse_network", "monitoring_api", "network_utils", @@ -952,6 +1324,7 @@ dependencies = [ name = "beacon_node_fallback" version = "0.1.0" dependencies = [ + "bls", "clap", "eth2", "futures", @@ -979,7 +1352,7 @@ dependencies = [ "logging", "metrics", "num_cpus", - "parking_lot 0.12.3", + "parking_lot", "serde", "slot_clock", "strum", @@ -990,12 +1363,6 @@ dependencies = [ "types", ] -[[package]] -name = "bech32" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dabbe35f96fb9507f7330793dc490461b2962659ac5d427181e451a623751d1" - [[package]] name = "bincode" version = "1.3.3" @@ -1011,7 +1378,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -1024,7 +1391,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn 2.0.110", "which", ] @@ -1044,38 +1411,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" [[package]] -name = "bitflags" -version = "1.3.2" +name = "bitcoin-io" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "0b47c4ab7a93edb0c7198c5535ed9b52b63095f4e9b45279c6736cec4b856baf" [[package]] -name = "bitflags" -version = "2.9.0" +name = "bitcoin_hashes" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", + "hex-conservative", +] [[package]] -name = "bitvec" -version = "0.17.4" +name = "bitflags" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" -dependencies = [ - "either", - "radium 0.3.0", -] +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] -name = "bitvec" -version = "0.20.4" +name = "bitflags" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" -dependencies = [ - "funty 1.1.0", - "radium 0.6.2", - "tap", - "wyz 0.2.0", -] +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bitvec" @@ -1083,10 +1444,10 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "funty 2.0.0", - "radium 0.7.0", + "funty", + "radium", "tap", - "wyz 0.5.1", + "wyz", ] [[package]] @@ -1098,26 +1459,13 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "block-buffer" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" -dependencies = [ - "block-padding 0.1.5", - "byte-tools", - "byteorder", - "generic-array 0.12.4", -] - [[package]] name = "block-buffer" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "block-padding 0.2.1", - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -1126,24 +1474,18 @@ version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] -name = "block-padding" -version = "0.1.5" +name = "block2" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" dependencies = [ - "byte-tools", + "objc2", ] -[[package]] -name = "block-padding" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d696c370c750c948ada61c69a0ee2cbbb9c50b1019ddb86d9317157a99c2cae" - [[package]] name = "bls" version = "0.2.0" @@ -1156,7 +1498,7 @@ dependencies = [ "ethereum_ssz", "fixed_bytes", "hex", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "serde", "tree_hash", @@ -1183,8 +1525,8 @@ checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" dependencies = [ "blst", "byte-slice-cast", - "ff 0.13.1", - "group 0.13.0", + "ff", + "group", "pairing", "rand_core 0.6.4", "serde", @@ -1213,6 +1555,29 @@ dependencies = [ "types", ] +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "bs58" version = "0.4.0" @@ -1232,11 +1597,13 @@ dependencies = [ name = "builder_client" version = "0.1.0" dependencies = [ + "bls", + "context_deserialize", "eth2", "ethereum_ssz", "lighthouse_version", "mockito", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -1245,9 +1612,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byte-slice-cast" @@ -1255,12 +1622,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" -[[package]] -name = "byte-tools" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" - [[package]] name = "byteorder" version = "1.5.0" @@ -1269,9 +1630,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" dependencies = [ "serde", ] @@ -1307,15 +1668,17 @@ dependencies = [ "glob", "hex", "libc", + "once_cell", + "serde", ] [[package]] name = "camino" -version = "1.1.9" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" +checksum = "276a59bf2b2c967788139340c9f0c5b12d7fd6630315c15c217e559de85d2609" dependencies = [ - "serde", + "serde_core", ] [[package]] @@ -1335,10 +1698,10 @@ checksum = "dd5eb614ed4c27c5d706420e4320fbe3216ab31fa1c33cd8246ac36dae4479ba" dependencies = [ "camino", "cargo-platform", - "semver 1.0.26", + "semver 1.0.27", "serde", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -1349,10 +1712,11 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.16" +version = "1.2.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -1369,9 +1733,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "cfg_aliases" @@ -1405,14 +1769,14 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", + "serde", "wasm-bindgen", "windows-link", ] @@ -1450,7 +1814,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -1477,9 +1841,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.32" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6088f3ae8c3608d19260cd7445411865a485688711b78b5be70d78cd96136f83" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -1487,9 +1851,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.32" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a7ef7f676155edfb82daa97f99441f3ebf4a58d5e32f295a56259f1b6facc8" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -1500,21 +1864,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d" [[package]] name = "clap_utils" @@ -1556,7 +1920,7 @@ dependencies = [ "monitoring_api", "network", "operation_pool", - "rand 0.9.0", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", @@ -1584,68 +1948,11 @@ dependencies = [ "cc", ] -[[package]] -name = "coins-bip32" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634c509653de24b439672164bbf56f5f582a2ab0e313d3b0f6af0b7345cf2560" -dependencies = [ - "bincode", - "bs58 0.4.0", - "coins-core", - "digest 0.10.7", - "getrandom 0.2.15", - "hmac 0.12.1", - "k256 0.11.6", - "lazy_static", - "serde", - "sha2 0.10.8", - "thiserror 1.0.69", -] - -[[package]] -name = "coins-bip39" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" -dependencies = [ - "bitvec 0.17.4", - "coins-bip32", - "getrandom 0.2.15", - "hex", - "hmac 0.12.1", - "pbkdf2 0.11.0", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror 1.0.69", -] - -[[package]] -name = "coins-core" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c94090a6663f224feae66ab01e41a2555a8296ee07b5f20dab8888bdefc9f617" -dependencies = [ - "base58check", - "base64 0.12.3", - "bech32", - "blake2", - "digest 0.10.7", - "generic-array 0.14.7", - "hex", - "ripemd", - "serde", - "serde_derive", - "sha2 0.10.8", - "sha3 0.10.8", - "thiserror 1.0.69", -] - [[package]] name = "colorchoice" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] name = "colored" @@ -1658,15 +1965,19 @@ dependencies = [ [[package]] name = "compare_fields" -version = "0.2.0" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05162add7c8618791829528194a271dca93f69194d35b19db1ca7fbfb8275278" dependencies = [ "compare_fields_derive", - "itertools 0.10.5", + "itertools 0.14.0", ] [[package]] name = "compare_fields_derive" -version = "0.2.0" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ee468b2e568b668e2a686112935e7bbe9a81bf4fa6b9f6fc3410ea45fb7ce" dependencies = [ "quote", "syn 1.0.109", @@ -1722,15 +2033,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.14.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b0485bab839b018a8f1723fc5391819fea5f8f0f32288ef8a735fd096b6160c" +checksum = "3bb320cac8a0750d7f25280aa97b09c26edfe161164238ecbbb31092b079e735" dependencies = [ "cfg-if", "cpufeatures", - "hex", "proptest", - "serde", + "serde_core", ] [[package]] @@ -1739,11 +2049,17 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-str" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3" + [[package]] name = "const_format" -version = "0.2.34" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "126f97965c8ad46d6d9163268ff28432e8f6a1196a55578867832e3049df63dd" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" dependencies = [ "const_format_proc_macros", ] @@ -1767,22 +2083,21 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "context_deserialize" -version = "0.1.0" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5f9ea0a0ae2de4943f5ca71590b6dbd0b952475f0a0cafb30a470cec78c8b9" dependencies = [ "context_deserialize_derive", - "milhouse", "serde", - "ssz_types", ] [[package]] name = "context_deserialize_derive" -version = "0.1.0" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c57b2db1e4e3ed804dcc49894a144b68fe6c754b8f545eb1dda7ad3c7dbe7e6" dependencies = [ - "context_deserialize", "quote", - "serde", - "serde_json", "syn 1.0.109", ] @@ -1792,15 +2107,6 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" -[[package]] -name = "convert_case" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -1847,9 +2153,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -1862,9 +2168,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1947,21 +2253,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "crunchy" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" - -[[package]] -name = "crypto-bigint" -version = "0.4.9" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array 0.14.7", - "rand_core 0.6.4", - "subtle", - "zeroize", -] +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-bigint" @@ -1969,7 +2263,7 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "generic-array 0.14.7", + "generic-array", "rand_core 0.6.4", "subtle", "zeroize", @@ -1977,11 +2271,11 @@ dependencies = [ [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ - "generic-array 0.14.7", + "generic-array", "rand_core 0.6.4", "typenum", ] @@ -1992,7 +2286,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25fab6889090c8133f3deb8f73ba3c65a7f456f66436fc012a1b1e272b1e103e" dependencies = [ - "generic-array 0.14.7", + "generic-array", "subtle", ] @@ -2016,12 +2310,13 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.5" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" +checksum = "73736a89c4aff73035ba2ed2e565061954da00d4970fc9ac25dcc85a2a20d790" dependencies = [ - "nix 0.29.0", - "windows-sys 0.59.0", + "dispatch2", + "nix 0.30.1", + "windows-sys 0.61.2", ] [[package]] @@ -2048,7 +2343,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2063,12 +2358,22 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.10" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +dependencies = [ + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ - "darling_core 0.20.10", - "darling_macro 0.20.10", + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -2087,16 +2392,31 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.10" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.11.1", + "syn 2.0.110", +] + +[[package]] +name = "darling_core" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", + "serde", "strsim 0.11.1", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2112,13 +2432,24 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.10" +version = "0.20.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +dependencies = [ + "darling_core 0.20.11", + "quote", + "syn 2.0.110", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ - "darling_core 0.20.10", + "darling_core 0.21.3", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2141,17 +2472,31 @@ dependencies = [ "libc", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" [[package]] name = "data-encoding-macro" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9724adfcf41f45bf652b3995837669d73c4d49a1b5ac1ff82905ac7d9b5558" +checksum = "47ce6c96ea0102f01122a185683611bd5ac8d99e62bc59dd12e6bda344ee673d" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -2159,12 +2504,12 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4fdb82bd54a12e42fb58a800dcae6b9e13982238ce2296dc3570b92148e1f" +checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976" dependencies = [ "data-encoding", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2192,9 +2537,9 @@ checksum = "b72465f46d518f6015d9cf07f7f3013a95dd6b9c2747c3d65ae0cce43929d14f" [[package]] name = "delay_map" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df941644b671f05f59433e481ba0d31ac10e3667de725236a4c0d587c496fba1" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ "futures", "tokio", @@ -2205,10 +2550,13 @@ dependencies = [ name = "deposit_contract" version = "0.2.0" dependencies = [ - "ethabi 16.0.0", + "alloy-dyn-abi", + "alloy-json-abi", + "alloy-primitives", + "bls", "ethereum_ssz", "hex", - "reqwest 0.11.27", + "reqwest", "serde_json", "sha2 0.9.9", "tree_hash", @@ -2217,22 +2565,11 @@ dependencies = [ [[package]] name = "der" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" -dependencies = [ - "const-oid", - "zeroize", -] - -[[package]] -name = "der" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", - "pem-rfc7468", "zeroize", ] @@ -2252,11 +2589,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" dependencies = [ "powerfmt", + "serde_core", ] [[package]] @@ -2272,35 +2610,26 @@ dependencies = [ [[package]] name = "derive_arbitrary" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" +checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "derive_more" -version = "0.99.19" +version = "0.99.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da29a38df43d6f156149c9b43ded5e018ddff2a855cf2cfd62e8cd7d079c69f" +checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" dependencies = [ - "convert_case 0.4.0", + "convert_case", "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.100", -] - -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl 1.0.0", + "syn 2.0.110", ] [[package]] @@ -2309,18 +2638,7 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" dependencies = [ - "derive_more-impl 2.0.1", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "derive_more-impl", ] [[package]] @@ -2331,26 +2649,17 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "unicode-xid", ] -[[package]] -name = "digest" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" -dependencies = [ - "generic-array 0.12.4", -] - [[package]] name = "digest" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.7", + "generic-array", ] [[package]] @@ -2396,9 +2705,9 @@ dependencies = [ [[package]] name = "discv5" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a20b702c8491b3325866a4935d0b5101e49144d74540384243b6293794aad6fa" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" dependencies = [ "aes 0.8.4", "aes-gcm", @@ -2414,19 +2723,31 @@ dependencies = [ "hkdf", "lazy_static", "libp2p-identity", - "lru", + "lru 0.12.5", "more-asserts", "multiaddr", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", "uint 0.10.0", "zeroize", ] +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -2435,7 +2756,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2443,11 +2764,12 @@ name = "doppelganger_service" version = "0.1.0" dependencies = [ "beacon_node_fallback", + "bls", "environment", "eth2", "futures", "logging", - "parking_lot 0.12.3", + "parking_lot", "slot_clock", "task_executor", "tokio", @@ -2469,16 +2791,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" [[package]] -name = "ecdsa" -version = "0.14.8" +name = "dunce" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "ecdsa" @@ -2486,12 +2808,13 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.9", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", + "elliptic-curve", + "rfc6979", + "serdect", + "signature", + "spki", ] [[package]] @@ -2500,21 +2823,21 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] name = "ed25519-dalek" -version = "2.1.1" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +checksum = "70e796c081cee67dc755e1a36a0a172b897fab85fc3f6bc48307991f64e4eca9" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", "zeroize", ] @@ -2528,7 +2851,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -2539,9 +2862,8 @@ dependencies = [ "beacon_chain", "bls", "compare_fields", - "compare_fields_derive", "context_deserialize", - "derivative", + "educe", "eth2_network_config", "ethereum_ssz", "ethereum_ssz_derive", @@ -2551,16 +2873,19 @@ dependencies = [ "hex", "kzg", "logging", + "milhouse", "rayon", "serde", "serde_json", "serde_repr", "serde_yaml", "snap", + "ssz_types", "state_processing", "swap_or_not_shuffle", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -2580,7 +2905,7 @@ dependencies = [ "itertools 0.14.0", "serde", "serde_json", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -2588,7 +2913,9 @@ name = "eip_3076" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "ethereum_serde_utils", + "fixed_bytes", "serde", "serde_json", "tempfile", @@ -2600,6 +2927,9 @@ name = "either" version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] [[package]] name = "ekzg-bls12-381" @@ -2609,8 +2939,8 @@ checksum = "05c599a59deba6188afd9f783507e4d89efc997f0fa340a758f0d0992b322416" dependencies = [ "blst", "blstrs", - "ff 0.13.1", - "group 0.13.0", + "ff", + "group", "pairing", "subtle", ] @@ -2640,7 +2970,7 @@ dependencies = [ "ekzg-bls12-381", "ekzg-maybe-rayon", "ekzg-polynomial", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] @@ -2687,42 +3017,22 @@ dependencies = [ "serde_json", ] -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array 0.14.7", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.1", - "generic-array 0.14.7", - "group 0.13.0", - "pem-rfc7468", - "pkcs8 0.10.2", + "ff", + "generic-array", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", + "serdect", "subtle", "zeroize", ] @@ -2747,11 +3057,11 @@ dependencies = [ "bytes", "ed25519-dalek", "hex", - "k256 0.13.4", + "k256", "log", "rand 0.8.5", "serde", - "sha3 0.10.8", + "sha3", "zeroize", ] @@ -2761,40 +3071,30 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1e6a265c649f3f5979b601d26f1d05ada116434c87741c9493cb56218f76cbc" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "enum-ordinalize" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea0dcfa4e54eeb516fe454635a95753ddd39acda650ce703031c6973e315dd5" +checksum = "4a1091a7bb1f8f2c4b28f1fe2cef4980ca2d410a3d727d67ecc3178c9b0800f0" dependencies = [ "enum-ordinalize-derive", ] [[package]] name = "enum-ordinalize-derive" -version = "4.3.1" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d28318a75d4aead5c4db25382e8ef717932d0346600cacae6357eb5941bc5ff" +checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "env_logger" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" -dependencies = [ - "log", - "regex", + "syn 2.0.110", ] [[package]] @@ -2827,62 +3127,39 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.10" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", -] - -[[package]] -name = "eth-keystore" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fda3bf123be441da5260717e0661c25a2fd9cb2b2c1d20bf2e05580047158ab" -dependencies = [ - "aes 0.8.4", - "ctr 0.9.2", - "digest 0.10.7", - "hex", - "hmac 0.12.1", - "pbkdf2 0.11.0", - "rand 0.8.5", - "scrypt 0.10.0", - "serde", - "serde_json", - "sha2 0.10.8", - "sha3 0.10.8", - "thiserror 1.0.69", - "uuid 0.8.2", + "windows-sys 0.61.2", ] [[package]] name = "eth2" version = "0.1.0" dependencies = [ - "derivative", + "bls", + "context_deserialize", + "educe", "eip_3076", - "either", - "enr", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "futures", "futures-util", - "libp2p-identity", "mediatype", - "multiaddr", "pretty_reqwest_error", "proto_array", - "rand 0.9.0", - "reqwest 0.11.27", + "rand 0.9.2", + "reqwest", "reqwest-eventsource", "sensitive_url", "serde", "serde_json", "ssz_types", + "superstruct", "test_random_derive", "tokio", "types", @@ -2932,8 +3209,8 @@ dependencies = [ "hex", "hmac 0.11.0", "pbkdf2 0.8.0", - "rand 0.9.0", - "scrypt 0.7.0", + "rand 0.9.2", + "scrypt", "serde", "serde_json", "serde_repr", @@ -2952,9 +3229,10 @@ dependencies = [ "discv5", "eth2_config", "ethereum_ssz", + "fixed_bytes", "kzg", "pretty_reqwest_error", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde_yaml", "sha2 0.9.9", @@ -2973,7 +3251,7 @@ dependencies = [ "eth2_key_derivation", "eth2_keystore", "hex", - "rand 0.9.0", + "rand 0.9.2", "serde", "serde_json", "serde_repr", @@ -2984,295 +3262,64 @@ dependencies = [ [[package]] name = "eth2_wallet_manager" -version = "0.1.0" -dependencies = [ - "eth2_wallet", - "lockfile", - "tempfile", -] - -[[package]] -name = "ethabi" -version = "16.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c98847055d934070b90e806e12d3936b787d0a115068981c1d8dfd5dfef5a5" -dependencies = [ - "ethereum-types 0.12.1", - "hex", - "serde", - "serde_json", - "sha3 0.9.1", - "thiserror 1.0.69", - "uint 0.9.5", -] - -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types 0.14.1", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3 0.10.8", - "thiserror 1.0.69", - "uint 0.9.5", -] - -[[package]] -name = "ethbloom" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb684ac8fa8f6c5759f788862bb22ec6fe3cb392f6bfd08e3c64b603661e3f8" -dependencies = [ - "crunchy", - "fixed-hash 0.7.0", - "impl-rlp", - "impl-serde 0.3.2", - "tiny-keccak", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash 0.8.0", - "impl-codec 0.6.0", - "impl-rlp", - "impl-serde 0.4.0", - "scale-info", - "tiny-keccak", -] - -[[package]] -name = "ethereum-types" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05136f7057fe789f06e6d41d07b34e6f70d8c86e5693b60f97aaa6553553bdaf" -dependencies = [ - "ethbloom 0.11.1", - "fixed-hash 0.7.0", - "impl-rlp", - "impl-serde 0.3.2", - "primitive-types 0.10.1", - "uint 0.9.5", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom 0.13.0", - "fixed-hash 0.8.0", - "impl-codec 0.6.0", - "impl-rlp", - "impl-serde 0.4.0", - "primitive-types 0.12.2", - "scale-info", - "uint 0.9.5", -] - -[[package]] -name = "ethereum_hashing" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c853bd72c9e5787f8aafc3df2907c2ed03cff3150c3acd94e2e53a98ab70a8ab" -dependencies = [ - "cpufeatures", - "ring", - "sha2 0.10.8", -] - -[[package]] -name = "ethereum_serde_utils" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" -dependencies = [ - "alloy-primitives", - "hex", - "serde", - "serde_derive", - "serde_json", -] - -[[package]] -name = "ethereum_ssz" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca8ba45b63c389c6e115b095ca16381534fdcc03cf58176a3f8554db2dbe19b" -dependencies = [ - "alloy-primitives", - "arbitrary", - "ethereum_serde_utils", - "itertools 0.13.0", - "serde", - "serde_derive", - "smallvec", - "typenum", -] - -[[package]] -name = "ethereum_ssz_derive" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd55d08012b4e0dfcc92b8d6081234df65f2986ad34cc76eeed69c5e2ce7506" -dependencies = [ - "darling 0.20.10", - "proc-macro2", - "quote", - "syn 2.0.100", -] - -[[package]] -name = "ethers-contract" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" -dependencies = [ - "ethers-core", - "ethers-providers", - "futures-util", - "hex", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror 1.0.69", -] - -[[package]] -name = "ethers-core" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" -dependencies = [ - "arrayvec", - "bytes", - "chrono", - "convert_case 0.6.0", - "elliptic-curve 0.12.3", - "ethabi 18.0.0", - "generic-array 0.14.7", - "hex", - "k256 0.11.6", - "open-fastrlp", - "proc-macro2", - "rand 0.8.5", - "rlp", - "rlp-derive", - "serde", - "serde_json", - "strum", - "syn 1.0.109", - "thiserror 1.0.69", - "tiny-keccak", - "unicode-xid", +version = "0.1.0" +dependencies = [ + "eth2_wallet", + "lockfile", + "tempfile", ] [[package]] -name = "ethers-etherscan" -version = "1.0.2" +name = "ethereum_hashing" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" +checksum = "5aa93f58bb1eb3d1e556e4f408ef1dac130bad01ac37db4e7ade45de40d1c86a" dependencies = [ - "ethers-core", - "getrandom 0.2.15", - "reqwest 0.11.27", - "semver 1.0.26", - "serde", - "serde-aux", - "serde_json", - "thiserror 1.0.69", - "tracing", + "cpufeatures", + "ring", + "sha2 0.10.9", ] [[package]] -name = "ethers-middleware" -version = "1.0.2" +name = "ethereum_serde_utils" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e71df7391b0a9a51208ffb5c7f2d068900e99d6b3128d3a4849d138f194778b7" +checksum = "3dc1355dbb41fbbd34ec28d4fb2a57d9a70c67ac3c19f6a5ca4d4a176b9e997a" dependencies = [ - "async-trait", - "auto_impl 0.5.0", - "ethers-contract", - "ethers-core", - "ethers-etherscan", - "ethers-providers", - "ethers-signers", - "futures-locks", - "futures-util", - "instant", - "reqwest 0.11.27", + "alloy-primitives", + "hex", "serde", + "serde_derive", "serde_json", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "url", ] [[package]] -name = "ethers-providers" -version = "1.0.2" +name = "ethereum_ssz" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a9e0597aa6b2fdc810ff58bc95e4eeaa2c219b3e615ed025106ecb027407d8" +checksum = "7e8cd8c4f47dfb947dbfe3cdf2945ae1da808dbedc592668658e827a12659ba1" dependencies = [ - "async-trait", - "auto_impl 1.2.1", - "base64 0.13.1", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "getrandom 0.2.15", - "hashers", - "hex", - "http 0.2.12", - "once_cell", - "parking_lot 0.11.2", - "pin-project", - "reqwest 0.11.27", + "alloy-primitives", + "arbitrary", + "context_deserialize", + "ethereum_serde_utils", + "itertools 0.13.0", "serde", - "serde_json", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-timer", - "web-sys", - "ws_stream_wasm", + "serde_derive", + "smallvec", + "typenum", ] [[package]] -name = "ethers-signers" -version = "1.0.2" +name = "ethereum_ssz_derive" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f41ced186867f64773db2e55ffdd92959e094072a1d09a5e5e831d443204f98" +checksum = "78d247bc40823c365a62e572441a8f8b12df03f171713f06bc76180fcd56ab71" dependencies = [ - "async-trait", - "coins-bip32", - "coins-bip39", - "elliptic-curve 0.12.3", - "eth-keystore", - "ethers-core", - "hex", - "rand 0.8.5", - "sha2 0.10.8", - "thiserror 1.0.69", + "darling 0.20.11", + "proc-macro2", + "quote", + "syn 2.0.110", ] [[package]] @@ -3283,9 +3330,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.4.0" +version = "5.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3492acde4c3fc54c845eaab3eed8bd00c7a7d881f78bfc801e43a93dec1331ae" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" dependencies = [ "concurrent-queue", "parking", @@ -3294,11 +3341,11 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ - "event-listener 5.4.0", + "event-listener 5.4.1", "pin-project-lite", ] @@ -3317,24 +3364,28 @@ dependencies = [ name = "execution_engine_integration" version = "0.1.0" dependencies = [ + "alloy-network", + "alloy-primitives", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-signer-local", "async-channel 1.9.0", + "bls", "deposit_contract", - "ethers-core", - "ethers-middleware", - "ethers-providers", - "ethers-signers", "execution_layer", + "fixed_bytes", "fork_choice", "futures", "hex", "logging", "network_utils", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde_json", "task_executor", "tempfile", "tokio", + "typenum", "types", ] @@ -3345,13 +3396,14 @@ dependencies = [ "alloy-consensus", "alloy-primitives", "alloy-rlp", + "alloy-rpc-types-eth", "arc-swap", + "bls", "builder_client", "bytes", "eth2", "ethereum_serde_utils", "ethereum_ssz", - "ethers-core", "fixed_bytes", "fork_choice", "hash-db", @@ -3362,12 +3414,12 @@ dependencies = [ "kzg", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "metrics", - "parking_lot 0.12.3", + "parking_lot", "pretty_reqwest_error", - "rand 0.9.0", - "reqwest 0.11.27", + "rand 0.9.2", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -3385,17 +3437,12 @@ dependencies = [ "tree_hash", "tree_hash_derive", "triehash", + "typenum", "types", "warp", "zeroize", ] -[[package]] -name = "fake-simd" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -3421,7 +3468,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl", "bytes", ] @@ -3432,7 +3479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce8dba4714ef14b8274c371879b175aa55b16b30f269663f19d576f380018dc4" dependencies = [ "arrayvec", - "auto_impl 1.2.1", + "auto_impl", "bytes", ] @@ -3446,23 +3493,13 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0b50bfb653653f9ca9095b427bed08ab8d75a137839d9ad64eb11810d5b6393" dependencies = [ - "bitvec 1.0.1", + "bitvec", "rand_core 0.6.4", "subtle", ] @@ -3498,16 +3535,10 @@ dependencies = [ ] [[package]] -name = "fixed-hash" -version = "0.7.0" +name = "find-msvc-tools" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" -dependencies = [ - "byteorder", - "rand 0.8.5", - "rustc-hex", - "static_assertions", -] +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" [[package]] name = "fixed-hash" @@ -3531,9 +3562,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "bfe33edd8e85a12a67454e37f8c75e730830d83e313556ab9ebf9ee7fbeb3bfb" dependencies = [ "crc32fast", "libz-sys", @@ -3548,9 +3579,15 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foldhash" -version = "0.1.4" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" [[package]] name = "foreign-types" @@ -3574,6 +3611,7 @@ dependencies = [ "beacon_chain", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "logging", "metrics", "proto_array", @@ -3587,9 +3625,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -3610,12 +3648,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "funty" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" - [[package]] name = "funty" version = "2.0.0" @@ -3683,24 +3715,14 @@ checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" -version = "2.6.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5edaec856126859abb19ed65f39e90fea3a9574b9707f13539acf4abf7eb532" +checksum = "f78e10609fe0e0b3f4157ffab1876319b5b0db102a2c60dc4626306dc46b44ad" dependencies = [ "futures-core", "pin-project-lite", ] -[[package]] -name = "futures-locks" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ec6fe3675af967e67c5536c0b9d44e34e6c52f86bedc4ea49c5317b8e94d06" -dependencies = [ - "futures-channel", - "futures-task", -] - [[package]] name = "futures-macro" version = "0.3.31" @@ -3709,7 +3731,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -3719,7 +3741,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f2f12607f92c69b12ed746fabf9ca4f5c482cba46679c1a75b874ed7c26adb" dependencies = [ "futures-io", - "rustls 0.23.23", + "rustls 0.23.35", "rustls-pki-types", ] @@ -3760,35 +3782,10 @@ dependencies = [ ] [[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "generator" -version = "0.8.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" -dependencies = [ - "cfg-if", - "libc", - "log", - "rustversion", - "windows 0.58.0", -] - -[[package]] -name = "generic-array" -version = "0.12.4" +name = "futures-utils-wasm" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" -dependencies = [ - "typenum", -] +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" [[package]] name = "generic-array" @@ -3805,6 +3802,7 @@ dependencies = [ name = "genesis" version = "0.2.0" dependencies = [ + "bls", "ethereum_hashing", "ethereum_ssz", "int_to_bytes", @@ -3818,27 +3816,29 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasip2", + "wasm-bindgen", ] [[package]] @@ -3847,41 +3847,15 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ - "opaque-debug 0.3.1", + "opaque-debug", "polyval", ] -[[package]] -name = "gimli" -version = "0.31.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" - -[[package]] -name = "git-version" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" -dependencies = [ - "git-version-macro", -] - -[[package]] -name = "git-version-macro" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "graffiti_file" @@ -3895,24 +3869,13 @@ dependencies = [ "types", ] -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff", "rand 0.8.5", "rand_core 0.6.4", "rand_xorshift 0.3.0", @@ -3921,9 +3884,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -3931,7 +3894,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.8.0", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -3940,17 +3903,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.8" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "http 1.3.0", - "indexmap 2.8.0", + "http 1.3.1", + "indexmap 2.12.0", "slab", "tokio", "tokio-util", @@ -3959,12 +3922,13 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", + "zerocopy", ] [[package]] @@ -4000,23 +3964,23 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", - "foldhash", - "serde", + "foldhash 0.1.5", ] [[package]] -name = "hashers" -version = "1.0.1" +name = "hashbrown" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" dependencies = [ - "fxhash", + "foldhash 0.2.0", + "serde", ] [[package]] @@ -4043,7 +4007,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] [[package]] @@ -4093,12 +4057,6 @@ dependencies = [ "psutil", ] -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - [[package]] name = "heck" version = "0.5.0" @@ -4113,23 +4071,23 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" - -[[package]] -name = "hermit-abi" -version = "0.5.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbd780fe5cc30f81464441920d82ac8740e2e46b29a6fad543ddd075229ce37e" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hex-conservative" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5313b072ce3c597065a808dbf612c4c8e8590bdbf8b579508bf7a762c5eae6cd" dependencies = [ - "serde", + "arrayvec", ] [[package]] @@ -4154,10 +4112,10 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand 0.9.0", + "rand 0.9.2", "ring", - "socket2", - "thiserror 2.0.12", + "socket2 0.5.10", + "thiserror 2.0.17", "tinyvec", "tokio", "tracing", @@ -4176,11 +4134,11 @@ dependencies = [ "ipconfig", "moka", "once_cell", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "resolv-conf", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -4215,22 +4173,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d" dependencies = [ - "windows-sys 0.59.0", -] - -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", + "windows-sys 0.61.2", ] [[package]] @@ -4246,9 +4193,9 @@ dependencies = [ [[package]] name = "http" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a761d192fbf18bdef69f5ceedd0d1333afcbda0ee23840373b8317570d23c65" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -4273,7 +4220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.3.0", + "http 1.3.1", ] [[package]] @@ -4284,7 +4231,7 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "pin-project-lite", ] @@ -4295,14 +4242,17 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_processor", + "bls", "bs58 0.4.0", "bytes", + "context_deserialize", "directory", "either", "eth2", "ethereum_serde_utils", "ethereum_ssz", "execution_layer", + "fixed_bytes", "futures", "genesis", "health_metrics", @@ -4311,14 +4261,14 @@ dependencies = [ "lighthouse_tracing", "lighthouse_version", "logging", - "lru", + "lru 0.12.5", "metrics", "network", "network_utils", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "proto_array", - "rand 0.9.0", + "rand 0.9.2", "safe_arith", "sensitive_url", "serde", @@ -4350,7 +4300,7 @@ dependencies = [ "malloc_utils", "metrics", "network_utils", - "reqwest 0.11.27", + "reqwest", "serde", "slot_clock", "store", @@ -4375,9 +4325,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" @@ -4389,14 +4339,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -4405,20 +4355,22 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.8", - "http 1.3.0", + "futures-core", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "httparse", "httpdate", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -4426,16 +4378,19 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.2" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "rustls 0.21.12", + "http 1.3.1", + "hyper 1.8.1", + "hyper-util", + "rustls 0.23.35", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.26.4", + "tower-service", + "webpki-roots", ] [[package]] @@ -4444,7 +4399,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "pin-project-lite", "tokio", @@ -4453,31 +4408,39 @@ dependencies = [ [[package]] name = "hyper-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", - "hyper 0.14.32", + "http-body-util", + "hyper 1.8.1", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", ] [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", + "futures-core", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.8.1", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.6.1", "tokio", "tower-service", "tracing", @@ -4485,16 +4448,17 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.61" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", - "windows-core 0.52.0", + "windows-core 0.62.2", ] [[package]] @@ -4508,122 +4472,85 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" -dependencies = [ - "displaydoc", - "litemap", - "tinystr", - "writeable", - "zerovec", -] - -[[package]] -name = "icu_locid_transform" -version = "1.5.0" +name = "icu_locale_core" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", + "litemap", "tinystr", + "writeable", "zerovec", ] -[[package]] -name = "icu_locid_transform_data" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" dependencies = [ - "displaydoc", "icu_collections", "icu_normalizer_data", "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99" dependencies = [ - "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", - "stable_deref_trait", - "tinystr", + "icu_locale_core", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "ident_case" version = "1.0.1" @@ -4632,9 +4559,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -4643,9 +4570,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -4679,9 +4606,9 @@ dependencies = [ "netlink-proto", "netlink-sys", "rtnetlink", - "system-configuration 0.6.1", + "system-configuration", "tokio", - "windows 0.53.0", + "windows", ] [[package]] @@ -4694,60 +4621,24 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 1.3.0", + "http 1.3.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "tokio", "url", "xmltree", ] -[[package]] -name = "impl-codec" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161ebdfec3c8e3b52bf61c4f3550a1eea4f9579d10dc1b936f3171ebdcd6c443" -dependencies = [ - "parity-scale-codec 2.3.1", -] - [[package]] name = "impl-codec" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.7.4", -] - -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4551f042f3438e64dbd6226b20527fc84a6e1fe65688b58746a2f53623f25f5c" -dependencies = [ - "serde", -] - -[[package]] -name = "impl-serde" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" -dependencies = [ - "serde", + "parity-scale-codec", ] [[package]] @@ -4758,7 +4649,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -4769,18 +4660,20 @@ checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", + "serde", ] [[package]] name = "indexmap" -version = "2.8.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" +checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" dependencies = [ "arbitrary", "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.16.0", "serde", + "serde_core", ] [[package]] @@ -4794,9 +4687,9 @@ dependencies = [ "filesystem", "lockfile", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", - "reqwest 0.11.27", + "parking_lot", + "rand 0.9.2", + "reqwest", "serde", "serde_json", "signing_method", @@ -4815,19 +4708,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ - "generic-array 0.14.7", -] - -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", + "generic-array", ] [[package]] @@ -4865,8 +4746,8 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2", - "widestring 1.1.0", + "socket2 0.5.10", + "widestring 1.2.1", "windows-sys 0.48.0", "winreg", ] @@ -4877,22 +4758,32 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ - "hermit-abi 0.5.0", + "hermit-abi 0.5.2", "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -4938,18 +4829,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -4970,19 +4862,6 @@ dependencies = [ "simple_asn1", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.8", - "sha3 0.10.8", -] - [[package]] name = "k256" version = "0.13.4" @@ -4990,11 +4869,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", - "sha2 0.10.8", - "signature 2.2.0", + "serdect", + "sha2 0.10.9", + "signature", ] [[package]] @@ -5022,7 +4902,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b286e6b663fb926e1eeb68528e69cb70ed46c6d65871a21b2215ae8154c6d3c" dependencies = [ - "primitive-types 0.12.2", + "primitive-types", "tiny-keccak", ] @@ -5033,7 +4913,7 @@ dependencies = [ "arbitrary", "c-kzg", "criterion", - "derivative", + "educe", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", @@ -5079,6 +4959,7 @@ dependencies = [ "ethereum_hashing", "ethereum_ssz", "execution_layer", + "fixed_bytes", "hex", "lighthouse_network", "lighthouse_version", @@ -5124,25 +5005,25 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.175" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] name = "libloading" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-link", ] [[package]] name = "libm" -version = "0.2.11" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8355be11b20d696c8f18f6cc018c4e372165b1fa8126cef092399c9951984ffa" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" [[package]] name = "libmdbx" @@ -5151,11 +5032,11 @@ source = "git+https://github.com/sigp/libmdbx-rs?rev=e6ff4b9377c1619bcf0bfdf52be dependencies = [ "bitflags 1.3.2", "byteorder", - "derive_more 0.99.19", + "derive_more 0.99.20", "indexmap 1.9.3", "libc", "mdbx-sys", - "parking_lot 0.12.3", + "parking_lot", "thiserror 1.0.69", ] @@ -5169,7 +5050,7 @@ dependencies = [ "either", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "libp2p-allow-block-list", "libp2p-connection-limits", "libp2p-core", @@ -5188,7 +5069,7 @@ dependencies = [ "multiaddr", "pin-project", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -5227,12 +5108,12 @@ dependencies = [ "multiaddr", "multihash", "multistream-select", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "quick-protobuf", "rand 0.8.5", "rw-stream-sink", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "unsigned-varint 0.8.0", "web-time", @@ -5249,7 +5130,7 @@ dependencies = [ "hickory-resolver", "libp2p-core", "libp2p-identity", - "parking_lot 0.12.3", + "parking_lot", "smallvec", "tracing", ] @@ -5259,7 +5140,7 @@ name = "libp2p-gossipsub" version = "0.50.0" source = "git+https://github.com/sigp/rust-libp2p.git?rev=5acdf89a65d64098f9346efa5769e57bcd19dea9#5acdf89a65d64098f9346efa5769e57bcd19dea9" dependencies = [ - "async-channel 2.3.1", + "async-channel 2.5.0", "asynchronous-codec", "base64 0.22.1", "byteorder", @@ -5268,7 +5149,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "getrandom 0.2.15", + "getrandom 0.2.16", "hashlink 0.10.0", "hex_fmt", "libp2p-core", @@ -5279,7 +5160,7 @@ dependencies = [ "quick-protobuf-codec", "rand 0.8.5", "regex", - "sha2 0.10.8", + "sha2 0.10.9", "tracing", "web-time", ] @@ -5301,7 +5182,7 @@ dependencies = [ "quick-protobuf", "quick-protobuf-codec", "smallvec", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -5315,14 +5196,12 @@ dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "k256", "multihash", - "p256", "quick-protobuf", "rand 0.8.5", - "sec1 0.7.3", - "sha2 0.10.8", - "thiserror 2.0.12", + "sha2 0.10.9", + "thiserror 2.0.17", "tracing", "zeroize", ] @@ -5341,7 +5220,7 @@ dependencies = [ "libp2p-swarm", "rand 0.8.5", "smallvec", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5364,9 +5243,9 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.43.0" +version = "0.43.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8aaa6fee3722e355443058472fc4705d78681bc2d8e447a0bdeb3fecf40cd197" +checksum = "95a4019ba30c4e42b776113e9778071691fe3f34bf23b6b3bf0dfcf29d801f3d" dependencies = [ "asynchronous-codec", "bytes", @@ -5374,7 +5253,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", "smallvec", "tracing", @@ -5398,7 +5277,7 @@ dependencies = [ "rand 0.8.5", "snow", "static_assertions", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "x25519-dalek", "zeroize", @@ -5435,9 +5314,9 @@ dependencies = [ "quinn", "rand 0.8.5", "ring", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.35", + "socket2 0.5.10", + "thiserror 2.0.17", "tokio", "tracing", ] @@ -5455,7 +5334,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "libp2p-swarm-derive", - "lru", + "lru 0.12.5", "multistream-select", "rand 0.8.5", "smallvec", @@ -5470,9 +5349,9 @@ version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ - "heck 0.5.0", + "heck", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -5486,7 +5365,7 @@ dependencies = [ "if-watch", "libc", "libp2p-core", - "socket2", + "socket2 0.5.10", "tokio", "tracing", ] @@ -5503,9 +5382,9 @@ dependencies = [ "libp2p-identity", "rcgen", "ring", - "rustls 0.23.23", - "rustls-webpki 0.103.4", - "thiserror 2.0.12", + "rustls 0.23.35", + "rustls-webpki 0.103.8", + "thiserror 2.0.17", "x509-parser", "yasna", ] @@ -5534,19 +5413,19 @@ dependencies = [ "either", "futures", "libp2p-core", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", "yamux 0.12.1", - "yamux 0.13.4", + "yamux 0.13.8", ] [[package]] name = "libredox" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "libc", ] @@ -5563,9 +5442,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "15d118bbf3771060e7311cc7bb0545b01d08a8b4a7de949198dec1fa0ca1c0f7" dependencies = [ "cc", "pkg-config", @@ -5630,6 +5509,7 @@ dependencies = [ "alloy-primitives", "alloy-rlp", "async-channel 1.9.0", + "bls", "bytes", "delay_map", "directory", @@ -5639,6 +5519,7 @@ dependencies = [ "eth2", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "fnv", "futures", "hex", @@ -5649,15 +5530,14 @@ dependencies = [ "lighthouse_version", "local-ip-address", "logging", - "lru", + "lru 0.12.5", "lru_cache", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot", "prometheus-client", - "quickcheck", - "quickcheck_macros", - "rand 0.9.0", + "proptest", + "rand 0.9.2", "regex", "serde", "sha2 0.9.9", @@ -5672,6 +5552,7 @@ dependencies = [ "tokio-util", "tracing", "tracing-subscriber", + "typenum", "types", "unsigned-varint 0.8.0", ] @@ -5686,6 +5567,7 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_node_fallback", + "bls", "doppelganger_service", "either", "environment", @@ -5693,7 +5575,7 @@ dependencies = [ "futures", "initialized_validators", "logging", - "parking_lot 0.12.3", + "parking_lot", "serde", "signing_method", "slashing_protection", @@ -5708,9 +5590,8 @@ dependencies = [ [[package]] name = "lighthouse_version" -version = "0.1.0" +version = "8.0.1" dependencies = [ - "git-version", "regex", ] @@ -5728,15 +5609,15 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "linux-raw-sys" -version = "0.9.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9c683daf087dc577b7506e9695b3d556a9f3849903fa28186283afd6809e9" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lmdb-rkv" @@ -5761,23 +5642,22 @@ dependencies = [ [[package]] name = "local-ip-address" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3669cf5561f8d27e8fc84cc15e58350e70f557d4d65f70e3154e54cd2f8e1782" +checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" dependencies = [ "libc", "neli", - "thiserror 1.0.69", + "thiserror 2.0.17", "windows-sys 0.59.0", ] [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] @@ -5791,9 +5671,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "logging" @@ -5826,27 +5706,29 @@ dependencies = [ ] [[package]] -name = "loom" -version = "0.7.2" +name = "lru" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "cfg-if", - "generator", - "scoped-tls", - "tracing", - "tracing-subscriber", + "hashbrown 0.15.5", ] [[package]] name = "lru" -version = "0.12.5" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" +checksum = "227748d55f2f0ab4735d87fd623798cb6b664512fe979705f829c9f81c934465" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lru_cache" version = "0.1.0" @@ -5857,20 +5739,31 @@ dependencies = [ [[package]] name = "mach2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" +checksum = "d640282b302c0bb0a2a8e0233ead9035e3bed871f0b7e81fe4a1ec829765db44" dependencies = [ "libc", ] +[[package]] +name = "macro-string" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "malloc_utils" version = "0.1.0" dependencies = [ "libc", "metrics", - "parking_lot 0.12.3", + "parking_lot", "tikv-jemalloc-ctl", "tikv-jemallocator", ] @@ -5882,10 +5775,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" [[package]] -name = "match_cfg" -version = "0.1.0" +name = "match-lookup" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" +checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] [[package]] name = "matchers" @@ -5927,9 +5825,9 @@ checksum = "33746aadcb41349ec291e7f2f0a3aa6834d1d7c58066fb4b01f68efc4c4b7631" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memoffset" @@ -5947,8 +5845,7 @@ dependencies = [ "alloy-primitives", "ethereum_hashing", "fixed_bytes", - "quickcheck", - "quickcheck_macros", + "proptest", "safe_arith", ] @@ -5984,18 +5881,19 @@ dependencies = [ [[package]] name = "milhouse" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bdb104e38d3a8c5ffb7e9d2c43c522e6bcc34070edbadba565e722f0dee56c7" +checksum = "259dd9da2ae5e0278b95da0b7ecef9c18c309d0a2d9e6db57ed33b9e8910c5e7" dependencies = [ "alloy-primitives", "arbitrary", + "context_deserialize", "educe", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", "itertools 0.13.0", - "parking_lot 0.12.3", + "parking_lot", "rayon", "serde", "smallvec", @@ -6029,22 +5927,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.5" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi", + "windows-sys 0.61.2", ] [[package]] @@ -6076,7 +5975,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6088,7 +5987,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6101,13 +6000,13 @@ dependencies = [ "bytes", "colored", "futures-util", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-util", "log", - "rand 0.9.0", + "rand 0.9.2", "regex", "serde_json", "serde_urlencoded", @@ -6117,21 +6016,20 @@ dependencies = [ [[package]] name = "moka" -version = "0.12.10" +version = "0.12.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077" dependencies = [ "crossbeam-channel", "crossbeam-epoch", "crossbeam-utils", - "loom", - "parking_lot 0.12.3", + "equivalent", + "parking_lot", "portable-atomic", "rustc_version 0.4.1", "smallvec", "tagptr", - "thiserror 1.0.69", - "uuid 1.15.1", + "uuid 1.18.1", ] [[package]] @@ -6143,7 +6041,7 @@ dependencies = [ "lighthouse_version", "metrics", "regex", - "reqwest 0.11.27", + "reqwest", "sensitive_url", "serde", "serde_json", @@ -6180,11 +6078,12 @@ dependencies = [ [[package]] name = "multibase" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404" +checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77" dependencies = [ "base-x", + "base256emoji", "data-encoding", "data-encoding-macro", ] @@ -6303,7 +6202,7 @@ dependencies = [ "log", "netlink-packet-core", "netlink-sys", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -6331,18 +6230,19 @@ dependencies = [ "beacon_processor", "bls", "delay_map", - "derivative", + "educe", "eth2", "eth2_network_config", "ethereum_ssz", "execution_layer", + "fixed_bytes", "fnv", "futures", "genesis", "hex", "igd-next", "itertools 0.10.5", - "k256 0.13.4", + "k256", "kzg", "libp2p-gossipsub", "lighthouse_network", @@ -6352,9 +6252,9 @@ dependencies = [ "matches", "metrics", "operation_pool", - "parking_lot 0.12.3", + "parking_lot", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rand_chacha 0.3.1", "rand_chacha 0.9.0", "serde_json", @@ -6368,6 +6268,7 @@ dependencies = [ "tokio-stream", "tracing", "tracing-subscriber", + "typenum", "types", ] @@ -6381,7 +6282,7 @@ dependencies = [ "lru_cache", "metrics", "multiaddr", - "parking_lot 0.12.3", + "parking_lot", "serde", "tiny-keccak", ] @@ -6410,11 +6311,11 @@ dependencies = [ [[package]] name = "nix" -version = "0.29.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cfg-if", "cfg_aliases", "libc", @@ -6464,11 +6365,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.50.1" +version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] @@ -6483,11 +6384,10 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" dependencies = [ - "byteorder", "lazy_static", "libm", "num-integer", @@ -6537,33 +6437,64 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] +[[package]] +name = "num_enum" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1207a7e20ad57b847bbddc6776b968420d38292bbfe2089accff5e19e82454c" +dependencies = [ + "num_enum_derive", + "rustversion", +] + +[[package]] +name = "num_enum_derive" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "nybbles" -version = "0.3.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8983bb634df7248924ee0c4c3a749609b5abcb082c28fffe3254b3eb3602b307" +checksum = "2c4b5ecbd0beec843101bffe848217f770e8b8da81d8355b7d6e226f2199b3dc" dependencies = [ - "const-hex", + "alloy-rlp", + "cfg-if", + "proptest", + "ruint", + "serde", "smallvec", ] [[package]] -name = "object" -version = "0.36.7" +name = "objc2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" dependencies = [ - "memchr", + "objc2-encode", ] +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + [[package]] name = "oid-registry" version = "0.8.1" @@ -6575,19 +6506,25 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.21.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cde51589ab56b20a6f686b2c68f7a0bd6add753d697abf720d63f8db3ab7b1ad" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" dependencies = [ "critical-section", "portable-atomic", ] +[[package]] +name = "once_cell_polyfill" +version = "1.70.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" + [[package]] name = "oneshot_broadcast" version = "0.1.0" dependencies = [ - "parking_lot 0.12.3", + "parking_lot", ] [[package]] @@ -6596,50 +6533,19 @@ version = "11.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" -[[package]] -name = "opaque-debug" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" - [[package]] name = "opaque-debug" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec", - "auto_impl 1.2.1", - "bytes", - "ethereum-types 0.14.1", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "openssl" -version = "0.10.72" +version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fedfea7d58a1f73118430a55da6a286e7b044961736ce96a16a17068ea25e5da" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "cfg-if", "foreign-types", "libc", @@ -6656,7 +6562,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6667,18 +6573,18 @@ checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "openssl-src" -version = "300.4.2+3.4.1" +version = "300.5.4+3.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168ce4e058f975fe43e89d9ccf78ca668601887ae736090aacc23ae353c298e2" +checksum = "a507b3792995dae9b0df8a1c1e3771e8418b7c2d9f0baeba32e6fe8b06c7cb72" dependencies = [ "cc", ] [[package]] name = "openssl-sys" -version = "0.9.107" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8288979acd84749c744a9014b4382d42b8f7b2592847b5afb2ed29e5d16ede07" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -6697,7 +6603,7 @@ dependencies = [ "futures-sink", "js-sys", "pin-project-lite", - "thiserror 2.0.12", + "thiserror 2.0.17", "tracing", ] @@ -6709,9 +6615,9 @@ checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" dependencies = [ "async-trait", "bytes", - "http 1.3.0", + "http 1.3.1", "opentelemetry", - "reqwest 0.12.15", + "reqwest", ] [[package]] @@ -6720,14 +6626,14 @@ version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" dependencies = [ - "http 1.3.0", + "http 1.3.1", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", "prost", - "reqwest 0.12.15", - "thiserror 2.0.12", + "reqwest", + "thiserror 2.0.17", "tokio", "tonic 0.13.1", "tracing", @@ -6756,9 +6662,9 @@ dependencies = [ "futures-util", "opentelemetry", "percent-encoding", - "rand 0.9.0", + "rand 0.9.2", "serde_json", - "thiserror 2.0.12", + "thiserror 2.0.17", ] [[package]] @@ -6766,105 +6672,62 @@ name = "operation_pool" version = "0.2.0" dependencies = [ "beacon_chain", - "bitvec 1.0.1", - "derivative", + "bitvec", + "bls", + "educe", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "rayon", "serde", "state_processing", "store", + "superstruct", "tokio", + "typenum", "types", ] -[[package]] -name = "ordered-float" -version = "2.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" -dependencies = [ - "num-traits", -] - -[[package]] -name = "p256" -version = "0.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" -dependencies = [ - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", - "primeorder", - "sha2 0.10.8", -] - [[package]] name = "pairing" version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" -dependencies = [ - "group 0.13.0", -] - -[[package]] -name = "parity-scale-codec" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" -dependencies = [ - "arrayvec", - "bitvec 0.20.4", - "byte-slice-cast", - "impl-trait-for-tuples", - "parity-scale-codec-derive 2.3.1", - "serde", +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group", ] [[package]] name = "parity-scale-codec" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fde3d0718baf5bc92f577d652001da0f8d54cd03a7974e118d04fc888dc23d" +checksum = "799781ae679d79a948e13d4824a40970bfa500058d245760dd857301059810fa" dependencies = [ "arrayvec", - "bitvec 1.0.1", + "bitvec", "byte-slice-cast", "const_format", "impl-trait-for-tuples", - "parity-scale-codec-derive 3.7.4", + "parity-scale-codec-derive", "rustversion", "serde", ] [[package]] name = "parity-scale-codec-derive" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "parity-scale-codec-derive" -version = "3.7.4" +version = "3.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581c837bb6b9541ce7faa9377c20616e4fb7650f6b0f68bc93c827ee504fb7b3" +checksum = "34b4653168b563151153c9e4c08ebed57fb8262bebfa79711552fa983c623e7a" dependencies = [ - "proc-macro-crate 3.3.0", + "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -6875,50 +6738,25 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" -dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", -] - -[[package]] -name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", -] - -[[package]] -name = "parking_lot_core" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" -dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.10", + "redox_syscall", "smallvec", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -6956,55 +6794,35 @@ dependencies = [ "digest 0.10.7", "hmac 0.12.1", "password-hash", - "sha2 0.10.8", + "sha2 0.10.9", ] [[package]] name = "pem" -version = "3.0.5" +version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" +checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ "base64 0.22.1", - "serde", -] - -[[package]] -name = "pem-rfc7468" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" -dependencies = [ - "base64ct", + "serde_core", ] [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "pest" -version = "2.7.15" +version = "2.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7cafe60d6cf8e62e1b9b2ea516a089c008945bb5a275416789e7db0bc199dc" +checksum = "989e7521a040efde50c3ab6bbadafbe15ab6dc042686926be59ac35d74607df4" dependencies = [ "memchr", - "thiserror 2.0.12", "ucd-trie", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version 0.4.1", -] - [[package]] name = "pin-project" version = "1.1.10" @@ -7022,7 +6840,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7037,24 +6855,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.9", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -7099,17 +6907,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.7.4" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a604568c3202727d1507653cb121dbd627a58684eb09a820fd746bee38b4442f" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.4.0", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 0.38.44", - "tracing", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] @@ -7119,7 +6926,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", - "opaque-debug 0.3.1", + "opaque-debug", "universal-hash", ] @@ -7131,15 +6938,24 @@ checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", - "opaque-debug 0.3.1", + "opaque-debug", "universal-hash", ] [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] [[package]] name = "powerfmt" @@ -7153,7 +6969,7 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.23", + "zerocopy", ] [[package]] @@ -7186,40 +7002,18 @@ dependencies = [ name = "pretty_reqwest_error" version = "0.1.0" dependencies = [ - "reqwest 0.11.27", + "reqwest", "sensitive_url", ] [[package]] name = "prettyplease" -version = "0.2.30" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1ccf34da56fc294e7d4ccf69a85992b7dfb826b7cf57bac6a70bba3494cc08a" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.100", -] - -[[package]] -name = "primeorder" -version = "0.13.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" -dependencies = [ - "elliptic-curve 0.13.8", -] - -[[package]] -name = "primitive-types" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05e4722c697a58a99d5d06a08c30821d7c082a4632198de1eaa5a6c22ef42373" -dependencies = [ - "fixed-hash 0.7.0", - "impl-codec 0.5.1", - "impl-rlp", - "impl-serde 0.3.2", - "uint 0.9.5", + "syn 2.0.110", ] [[package]] @@ -7228,62 +7022,47 @@ version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ - "fixed-hash 0.8.0", - "impl-codec 0.6.0", - "impl-rlp", - "impl-serde 0.4.0", - "scale-info", + "fixed-hash", + "impl-codec", "uint 0.9.5", ] [[package]] name = "proc-macro-crate" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" -dependencies = [ - "once_cell", - "toml_edit 0.19.15", -] - -[[package]] -name = "proc-macro-crate" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.22.24", + "toml_edit", ] [[package]] -name = "proc-macro-error" -version = "1.0.4" +name = "proc-macro-error-attr2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" dependencies = [ - "proc-macro-error-attr", "proc-macro2", "quote", - "syn 1.0.109", - "version_check", ] [[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +name = "proc-macro-error2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" dependencies = [ + "proc-macro-error-attr2", "proc-macro2", "quote", - "version_check", + "syn 2.0.110", ] [[package]] name = "proc-macro2" -version = "1.0.94" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -7313,7 +7092,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot 0.12.3", + "parking_lot", "thiserror 1.0.69", ] @@ -7325,7 +7104,7 @@ checksum = "cf41c1a7c32ed72abe5082fb19505b969095c12da9f5732a4bc9878757fd087c" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.3", + "parking_lot", "prometheus-client-derive-encode", ] @@ -7337,23 +7116,22 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "proptest" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14cae93065090804185d3b75f0bf93b8eeda30c7a9b4a33d3bdb3988d6229e50" +checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.9.0", - "lazy_static", + "bitflags 2.10.0", "num-traits", - "rand 0.8.5", - "rand_chacha 0.3.1", - "rand_xorshift 0.3.0", + "rand 0.9.2", + "rand_chacha 0.9.0", + "rand_xorshift 0.4.0", "regex-syntax", "rusty-fork", "tempfile", @@ -7362,13 +7140,13 @@ dependencies = [ [[package]] name = "proptest-derive" -version = "0.5.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee1c9ac207483d5e7db4940700de86a9aae46ef90c48b57f99fe7edb8345e49" +checksum = "095a99f75c69734802359b682be8daaf8980296731f6470434ea2c652af1dd30" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7391,7 +7169,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -7409,6 +7187,7 @@ version = "0.2.0" dependencies = [ "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "safe_arith", "serde", "serde_yaml", @@ -7424,7 +7203,7 @@ checksum = "5e617cc9058daa5e1fe5a0d23ed745773a5ee354111dad1ec0235b0cc16b6730" dependencies = [ "cfg-if", "darwin-libproc", - "derive_more 0.99.19", + "derive_more 0.99.20", "glob", "mach2", "nix 0.24.3", @@ -7462,62 +7241,43 @@ dependencies = [ "unsigned-varint 0.8.0", ] -[[package]] -name = "quickcheck" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" -dependencies = [ - "env_logger", - "log", - "rand 0.8.5", -] - -[[package]] -name = "quickcheck_macros" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b22a693222d716a9587786f37ac3f6b4faedb5b80c23914e7303ff5a1d8016e9" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", + "cfg_aliases", "futures-io", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", - "rustls 0.23.23", - "socket2", - "thiserror 2.0.12", + "rustls 0.23.35", + "socket2 0.6.1", + "thiserror 2.0.17", "tokio", "tracing", + "web-time", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand 0.8.5", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", "ring", "rustc-hash 2.1.1", - "rustls 0.23.23", + "rustls 0.23.35", "rustls-pki-types", "slab", - "thiserror 2.0.12", + "thiserror 2.0.17", "tinyvec", "tracing", "web-time", @@ -7525,27 +7285,33 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2", + "socket2 0.6.1", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "quote" -version = "1.0.39" +version = "1.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1f1914ce909e1658d9907913b4b91947430c7d9be598b15a1912935b8c04801" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "r2d2" version = "0.8.10" @@ -7553,7 +7319,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" dependencies = [ "log", - "parking_lot 0.12.3", + "parking_lot", "scheduled-thread-pool", ] @@ -7567,18 +7333,6 @@ dependencies = [ "rusqlite", ] -[[package]] -name = "radium" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" - -[[package]] -name = "radium" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" - [[package]] name = "radium" version = "0.7.0" @@ -7594,18 +7348,18 @@ dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.4", + "serde", ] [[package]] name = "rand" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", "serde", - "zerocopy 0.8.23", ] [[package]] @@ -7634,7 +7388,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -7643,7 +7397,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", "serde", ] @@ -7667,9 +7421,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f" dependencies = [ "either", "rayon-core", @@ -7677,9 +7431,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.12.1" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91" dependencies = [ "crossbeam-deque", "crossbeam-utils", @@ -7700,29 +7454,20 @@ dependencies = [ [[package]] name = "redb" -version = "2.4.0" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea0a72cd7140de9fc3e318823b883abf819c20d478ec89ce880466dc2ef263c6" +checksum = "8eca1e9d98d5a7e9002d0013e18d5a9b000aee942eb134883a82f06ebffb6c01" dependencies = [ "libc", ] [[package]] name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.10" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", ] [[package]] @@ -7731,16 +7476,36 @@ version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "libredox", "thiserror 1.0.69", ] +[[package]] +name = "ref-cast" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" +dependencies = [ + "ref-cast-impl", +] + +[[package]] +name = "ref-cast-impl" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "regex" -version = "1.11.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", "memchr", @@ -7750,9 +7515,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ "aho-corasick", "memchr", @@ -7761,46 +7526,46 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.11.27" +version = "0.12.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" +checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bytes", - "encoding_rs", + "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", "hyper-rustls", "hyper-tls", - "ipnet", + "hyper-util", "js-sys", "log", - "mime", "native-tls", - "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "quinn", + "rustls 0.23.35", + "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration 0.5.1", + "sync_wrapper", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.1", + "tokio-rustls 0.26.4", "tokio-util", + "tower 0.5.2", + "tower-http", "tower-service", "url", "wasm-bindgen", @@ -7808,51 +7573,13 @@ dependencies = [ "wasm-streams", "web-sys", "webpki-roots", - "winreg", -] - -[[package]] -name = "reqwest" -version = "0.12.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" -dependencies = [ - "base64 0.22.1", - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "http 1.3.0", - "http-body 1.0.1", - "http-body-util", - "hyper 1.6.0", - "hyper-util", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 1.0.2", - "tokio", - "tower 0.5.2", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "windows-registry", ] [[package]] name = "reqwest-eventsource" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f529a5ff327743addc322af460761dff5b50e0c826b9e6ac44c3195c50bb2026" +checksum = "632c55746dbb44275691640e7b40c907c16a2dc1a5842aa98aaec90da6ec6bde" dependencies = [ "eventsource-stream", "futures-core", @@ -7860,30 +7587,15 @@ dependencies = [ "mime", "nom", "pin-project-lite", - "reqwest 0.11.27", + "reqwest", "thiserror 1.0.69", ] [[package]] name = "resolv-conf" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" -dependencies = [ - "hostname", - "quick-error", -] - -[[package]] -name = "rfc6979" -version = "0.3.1" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac 0.12.1", - "zeroize", -] +checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "rfc6979" @@ -7903,21 +7615,12 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted", "windows-sys 0.52.0", ] -[[package]] -name = "ripemd" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd124222d17ad93a644ed9d011a40f4fb64aa54275c08cc216524a9ea82fb09f" -dependencies = [ - "digest 0.10.7", -] - [[package]] name = "rlp" version = "0.5.2" @@ -7928,17 +7631,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "rpassword" version = "5.0.1" @@ -7978,28 +7670,29 @@ dependencies = [ [[package]] name = "ruint" -version = "1.14.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78a46eb779843b2c4f21fac5773e25d6d5b7c8f0922876c91541790d2ca27eef" +checksum = "a68df0380e5c9d20ce49534f292a36a7514ae21350726efe1865bdb1fa91d278" dependencies = [ "alloy-rlp", "arbitrary", "ark-ff 0.3.0", "ark-ff 0.4.2", + "ark-ff 0.5.0", "bytes", "fastrlp 0.3.1", "fastrlp 0.4.0", "num-bigint", "num-integer", "num-traits", - "parity-scale-codec 3.7.4", - "primitive-types 0.12.2", + "parity-scale-codec", + "primitive-types", "proptest", "rand 0.8.5", - "rand 0.9.0", + "rand 0.9.2", "rlp", "ruint-macro", - "serde", + "serde_core", "valuable", "zeroize", ] @@ -8041,12 +7734,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - [[package]] name = "rustc-hash" version = "1.1.0" @@ -8080,7 +7767,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ - "semver 1.0.26", + "semver 1.0.27", ] [[package]] @@ -8112,7 +7799,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys 0.4.15", @@ -8121,27 +7808,15 @@ dependencies = [ [[package]] name = "rustix" -version = "1.0.2" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7178faa4b75a30e269c71e61c353ce2748cf3d76f0c44c393f4e60abf49b825" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "errno", "libc", - "linux-raw-sys 0.9.2", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.2", ] [[package]] @@ -8160,38 +7835,29 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" dependencies = [ "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.8", + "rustls-webpki 0.103.8", "subtle", "zeroize", ] [[package]] name = "rustls-native-certs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.3.0", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", + "security-framework 3.5.1", ] [[package]] @@ -8205,24 +7871,14 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" dependencies = [ "web-time", "zeroize", ] -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.102.8" @@ -8236,9 +7892,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.4" +version = "0.103.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" dependencies = [ "ring", "rustls-pki-types", @@ -8247,15 +7903,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "rusty-fork" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2" dependencies = [ "fnv", "quick-error", @@ -8295,15 +7951,6 @@ dependencies = [ "cipher 0.3.0", ] -[[package]] -name = "salsa20" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" -dependencies = [ - "cipher 0.4.4", -] - [[package]] name = "same-file" version = "1.0.6" @@ -8314,45 +7961,45 @@ dependencies = [ ] [[package]] -name = "scale-info" -version = "2.11.6" +name = "schannel" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "346a3b32eba2640d17a9cb5927056b08f3de90f65b72fe09402c2ad07d684d0b" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "cfg-if", - "derive_more 1.0.0", - "parity-scale-codec 3.7.4", - "scale-info-derive", + "windows-sys 0.61.2", ] [[package]] -name = "scale-info-derive" -version = "2.11.6" +name = "scheduled-thread-pool" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6630024bf739e2179b91fb424b28898baf819414262c5d376677dbff1fe7ebf" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" dependencies = [ - "proc-macro-crate 3.3.0", - "proc-macro2", - "quote", - "syn 2.0.100", + "parking_lot", ] [[package]] -name = "schannel" -version = "0.1.27" +name = "schemars" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" dependencies = [ - "windows-sys 0.59.0", + "dyn-clone", + "ref-cast", + "serde", + "serde_json", ] [[package]] -name = "scheduled-thread-pool" -version = "0.2.7" +name = "schemars" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289" dependencies = [ - "parking_lot 0.12.3", + "dyn-clone", + "ref-cast", + "serde", + "serde_json", ] [[package]] @@ -8375,58 +8022,44 @@ checksum = "879588d8f90906e73302547e20fffefdd240eb3e0e744e142321f5d49dea0518" dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", - "salsa20 0.8.1", + "salsa20", "sha2 0.9.9", ] [[package]] -name = "scrypt" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f9e24d2b632954ded8ab2ef9fea0a0c769ea56ea98bddbafbad22caeeadf45d" -dependencies = [ - "hmac 0.12.1", - "pbkdf2 0.11.0", - "salsa20 0.10.2", - "sha2 0.10.8", -] - -[[package]] -name = "sct" -version = "0.7.1" +name = "sec1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "ring", - "untrusted", + "base16ct", + "der", + "generic-array", + "pkcs8", + "serdect", + "subtle", + "zeroize", ] [[package]] -name = "sec1" -version = "0.3.0" +name = "secp256k1" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" +checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array 0.14.7", - "pkcs8 0.9.0", - "subtle", - "zeroize", + "bitcoin_hashes", + "rand 0.8.5", + "secp256k1-sys", + "serde", ] [[package]] -name = "sec1" -version = "0.7.3" +name = "secp256k1-sys" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +checksum = "d4387882333d3aa8cb20530a17c69a3752e97837832f34f6dccc760e715001d9" dependencies = [ - "base16ct 0.2.0", - "der 0.7.9", - "generic-array 0.14.7", - "pkcs8 0.10.2", - "subtle", - "zeroize", + "cc", ] [[package]] @@ -8435,7 +8068,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -8444,11 +8077,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.3.0" +version = "3.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.10.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -8457,9 +8090,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -8476,11 +8109,12 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] @@ -8492,15 +8126,11 @@ dependencies = [ "pest", ] -[[package]] -name = "send_wrapper" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd0b0ec5f1c1ca621c432a25813d8d60c88abe6d3e08a3eb9cf37d97a0fe3d73" - [[package]] name = "sensitive_url" version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb7b0221fa9905eec4163dbf7660b1876cc95663af1deddc3e19ebe49167c58c" dependencies = [ "serde", "url", @@ -8508,65 +8138,55 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] [[package]] -name = "serde-aux" -version = "4.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5290c39c5f6992b9dddbda28541d965dba46468294e6018a408fa297e6c602de" -dependencies = [ - "serde", - "serde-value", - "serde_json", -] - -[[package]] -name = "serde-value" -version = "0.7.0" +name = "serde_array_query" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +checksum = "d89c6e82b1005b33d5b2bbc47096800e5ad6b67ef5636f9c13ad29a6935734a7" dependencies = [ - "ordered-float", "serde", + "serde_urlencoded", ] [[package]] -name = "serde_array_query" -version = "0.1.0" +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d89c6e82b1005b33d5b2bbc47096800e5ad6b67ef5636f9c13ad29a6935734a7" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ - "serde", - "serde_urlencoded", + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] @@ -8577,7 +8197,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -8592,19 +8212,60 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_with" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10574371d41b0d9b2cff89418eda27da52bcaff2cc8741db26382a77c29131f1" +dependencies = [ + "base64 0.22.1", + "chrono", + "hex", + "indexmap 1.9.3", + "indexmap 2.12.0", + "schemars 0.9.0", + "schemars 1.1.0", + "serde_core", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a72d8216842fdd57820dc78d840bef99248e35fb2554ff923319e60f2d686b" +dependencies = [ + "darling 0.21.3", + "proc-macro2", + "quote", + "syn 2.0.110", +] + [[package]] name = "serde_yaml" version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.12.0", "itoa", "ryu", "serde", "unsafe-libyaml", ] +[[package]] +name = "serdect" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a84f14a19e9a014bb9f4512488d9829a68e04ecabffb0f9904cd1ace94598177" +dependencies = [ + "base16ct", + "serde", +] + [[package]] name = "sha1" version = "0.10.6" @@ -8616,18 +8277,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "sha2" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha2" version = "0.9.9" @@ -8638,32 +8287,20 @@ dependencies = [ "cfg-if", "cpufeatures", "digest 0.9.0", - "opaque-debug 0.3.1", + "opaque-debug", ] [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", "digest 0.10.7", ] -[[package]] -name = "sha3" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81199417d4e5de3f04b1e871023acea7389672c4135918f05aa9cbf2f2fa809" -dependencies = [ - "block-buffer 0.9.0", - "digest 0.9.0", - "keccak", - "opaque-debug 0.3.1", -] - [[package]] name = "sha3" version = "0.10.8" @@ -8701,23 +8338,13 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.2.0" @@ -8732,18 +8359,26 @@ dependencies = [ name = "signing_method" version = "0.1.0" dependencies = [ + "bls", "eth2_keystore", "ethereum_serde_utils", "lockfile", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "serde", "task_executor", + "tracing", "types", "url", "validator_metrics", ] +[[package]] +name = "simd-adler32" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" + [[package]] name = "similar" version = "2.7.0" @@ -8758,7 +8393,7 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -8773,44 +8408,44 @@ dependencies = [ "kzg", "logging", "node_test_rig", - "parking_lot 0.12.3", + "parking_lot", "rayon", "sensitive_url", "serde_json", "tokio", "tracing", "tracing-subscriber", + "typenum", "types", ] [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "slasher" version = "0.1.0" dependencies = [ "bincode", + "bls", "byteorder", - "derivative", + "educe", "ethereum_ssz", "ethereum_ssz_derive", "filesystem", + "fixed_bytes", "flate2", "libmdbx", "lmdb-rkv", "lmdb-rkv-sys", - "lru", + "lru 0.12.5", "maplit", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "rayon", "redb", "safe_arith", @@ -8821,6 +8456,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", "types", ] @@ -8846,9 +8482,11 @@ name = "slashing_protection" version = "0.1.0" dependencies = [ "arbitrary", + "bls", "eip_3076", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "r2d2", "r2d2_sqlite", "rayon", @@ -8865,17 +8503,18 @@ name = "slot_clock" version = "0.2.0" dependencies = [ "metrics", - "parking_lot 0.12.3", + "parking_lot", "types", ] [[package]] name = "smallvec" -version = "1.14.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "arbitrary", + "serde", ] [[package]] @@ -8897,35 +8536,35 @@ dependencies = [ "rand_core 0.6.4", "ring", "rustc_version 0.4.1", - "sha2 0.10.8", + "sha2 0.10.9", "subtle", ] [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] -name = "spin" -version = "0.9.8" +name = "socket2" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] [[package]] -name = "spki" -version = "0.6.0" +name = "spin" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spki" @@ -8934,19 +8573,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.9", + "der", ] [[package]] name = "ssz_types" -version = "0.11.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b55bedc9a18ed2860a46d6beb4f4082416ee1d60be0cc364cebdcdddc7afd4" +checksum = "1fc20a89bab2dabeee65e9c9eb96892dc222c23254b401e1319b85efd852fa31" dependencies = [ "arbitrary", + "context_deserialize", + "educe", "ethereum_serde_utils", "ethereum_ssz", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_derive", "smallvec", @@ -8956,9 +8597,9 @@ dependencies = [ [[package]] name = "stable_deref_trait" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] name = "state_processing" @@ -8967,16 +8608,18 @@ dependencies = [ "arbitrary", "beacon_chain", "bls", - "derivative", + "educe", "ethereum_hashing", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "int_to_bytes", "integer-sqrt", "itertools 0.10.5", "merkle_proof", "metrics", - "rand 0.9.0", + "milhouse", + "rand 0.9.2", "rayon", "safe_arith", "smallvec", @@ -8985,6 +8628,7 @@ dependencies = [ "tokio", "tracing", "tree_hash", + "typenum", "types", ] @@ -8993,7 +8637,9 @@ name = "state_transition_vectors" version = "0.1.0" dependencies = [ "beacon_chain", + "bls", "ethereum_ssz", + "fixed_bytes", "state_processing", "tokio", "types", @@ -9016,13 +8662,15 @@ dependencies = [ "directory", "ethereum_ssz", "ethereum_ssz_derive", + "fixed_bytes", "itertools 0.10.5", "leveldb", "logging", - "lru", + "lru 0.12.5", "metrics", - "parking_lot 0.12.3", - "rand 0.9.0", + "milhouse", + "parking_lot", + "rand 0.9.2", "redb", "safe_arith", "serde", @@ -9034,6 +8682,7 @@ dependencies = [ "tempfile", "tracing", "tracing-subscriber", + "typenum", "types", "xdelta3", "zstd 0.13.3", @@ -9053,24 +8702,23 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" -version = "0.24.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" -version = "0.24.3" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "quote", - "rustversion", - "syn 1.0.109", + "syn 2.0.110", ] [[package]] @@ -9085,12 +8733,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b986e4a629907f20a2c2a639a75bc22a8b5d99b444e0d83c395f4cb309022bf" dependencies = [ - "darling 0.20.10", + "darling 0.20.11", "itertools 0.13.0", "proc-macro2", "quote", "smallvec", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9116,9 +8764,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", @@ -9126,10 +8774,16 @@ dependencies = [ ] [[package]] -name = "sync_wrapper" -version = "0.1.2" +name = "syn-solidity" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "ff790eb176cc81bb8936aed0f7b9f14fc4670069a2d371b3e3b0ecce908b2cb3" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.110", +] [[package]] name = "sync_wrapper" @@ -9142,13 +8796,13 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9166,36 +8820,15 @@ dependencies = [ "winapi", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys 0.5.0", -] - [[package]] name = "system-configuration" version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" -dependencies = [ - "bitflags 2.9.0", - "core-foundation 0.9.4", - "system-configuration-sys 0.6.0", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "core-foundation-sys", - "libc", + "bitflags 2.10.0", + "core-foundation 0.9.4", + "system-configuration-sys", ] [[package]] @@ -9215,7 +8848,7 @@ dependencies = [ "lighthouse_network", "metrics", "network_utils", - "parking_lot 0.12.3", + "parking_lot", "serde", "sysinfo", "types", @@ -9255,26 +8888,25 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.18.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c317e0a526ee6120d8dabad239c8dadca62b24b6f168914bbbc8e2fb1f0e567" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ - "cfg-if", "fastrand", - "getrandom 0.3.1", + "getrandom 0.3.4", "once_cell", - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.2", ] [[package]] name = "terminal_size" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c6481c4829e4cc63825e62c49186a34538b7b2750b73b266581ffb612fb5ed" +checksum = "60b8cb979cb11c32ce1603f8137b22262a9d131aaa5c37b5678025f22b8becd0" dependencies = [ - "rustix 1.0.2", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.60.2", ] [[package]] @@ -9288,7 +8920,7 @@ name = "test_random_derive" version = "0.2.0" dependencies = [ "quote", - "syn 1.0.109", + "syn 2.0.110", ] [[package]] @@ -9302,11 +8934,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.17", ] [[package]] @@ -9317,28 +8949,27 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -9352,9 +8983,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21f216790c8df74ce3ab25b534e0718da5a1916719771d3fec23315c99e468b" +checksum = "661f1f6a57b3a36dc9174a2c10f19513b4866816e13425d3e418b11cc37bc24c" dependencies = [ "libc", "paste", @@ -9363,9 +8994,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +version = "0.6.1+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +checksum = "cd8aa5b2ab86a2cefa406d889139c162cbb230092f7d1d7cbc1716405d852a3b" dependencies = [ "cc", "libc", @@ -9373,9 +9004,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +checksum = "0359b4327f954e0567e69fb191cf1436617748813819c94b8cd4a431422d053a" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -9383,9 +9014,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.39" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad298b01a40a23aac4580b67e3dbedb7cc8402f3592d7f49469de2ea4aecdd8" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -9398,15 +9029,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765c97a5b985b7c11d7bc27fa927dc4fe6af3a6dfb021d28deb60d3bf51e76ef" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.20" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8093bc3e81c3bc5f7879de09619d06c9a5a5e45ca44dfeeb7225bae38005c5c" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -9435,7 +9066,7 @@ dependencies = [ "pbkdf2 0.11.0", "rand 0.8.5", "rustc-hash 1.1.0", - "sha2 0.10.8", + "sha2 0.10.9", "thiserror 1.0.69", "unicode-normalization", "wasm-bindgen", @@ -9453,9 +9084,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" dependencies = [ "displaydoc", "zerovec", @@ -9473,9 +9104,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -9488,32 +9119,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.0" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9975ea0f48b5aa3972bf2d888c238182458437cc2a19374b81b25cdf1023fb3a" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2", + "socket2 0.6.1", "tokio-macros", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9526,16 +9156,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.25.0" @@ -9549,11 +9169,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.23", + "rustls 0.23.35", "tokio", ] @@ -9571,9 +9191,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" dependencies = [ "bytes", "futures-core", @@ -9586,30 +9206,32 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.8" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +dependencies = [ + "serde_core", +] [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.8.0", + "indexmap 2.12.0", "toml_datetime", - "winnow 0.5.40", + "toml_parser", + "winnow", ] [[package]] -name = "toml_edit" -version = "0.22.24" +name = "toml_parser" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" +checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" dependencies = [ - "indexmap 2.8.0", - "toml_datetime", - "winnow 0.7.3", + "winnow", ] [[package]] @@ -9623,17 +9245,17 @@ dependencies = [ "axum", "base64 0.22.1", "bytes", - "h2 0.4.8", - "http 1.3.0", + "h2 0.4.12", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", - "socket2", + "socket2 0.5.10", "tokio", "tokio-stream", "tower 0.4.13", @@ -9651,10 +9273,10 @@ dependencies = [ "async-trait", "base64 0.22.1", "bytes", - "http 1.3.0", + "http 1.3.1", "http-body 1.0.1", "http-body-util", - "hyper 1.6.0", + "hyper 1.8.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -9662,7 +9284,7 @@ dependencies = [ "prost", "rustls-native-certs", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.4", "tokio-stream", "tower 0.5.2", "tower-layer", @@ -9698,10 +9320,10 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.8.0", + "indexmap 2.12.0", "pin-project-lite", "slab", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tokio-util", "tower-layer", @@ -9709,6 +9331,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" version = "0.3.3" @@ -9747,35 +9387,25 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -9838,9 +9468,9 @@ dependencies = [ [[package]] name = "tree_hash" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee44f4cef85f88b4dea21c0b1f58320bdf35715cf56d840969487cff00613321" +checksum = "2db21caa355767db4fd6129876e5ae278a8699f4a6959b1e3e7aff610b532d52" dependencies = [ "alloy-primitives", "ethereum_hashing", @@ -9851,14 +9481,14 @@ dependencies = [ [[package]] name = "tree_hash_derive" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bee2ea1551f90040ab0e34b6fb7f2fa3bad8acc925837ac654f2c78a13e3089" +checksum = "711cc655fcbb48384a87dc2bf641b991a15c5ad9afc3caa0b1ab1df3b436f70f" dependencies = [ - "darling 0.20.10", + "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -9873,9 +9503,9 @@ dependencies = [ [[package]] name = "triomphe" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef8f7726da4807b58ea5c96fdc122f80702030edc33b35aff9190a51148ccc85" +checksum = "dd69c5aa8f924c7519d6372789a74eac5b94fb0f8fcf0d4a97eb0bfc3e785f39" dependencies = [ "serde", "stable_deref_trait", @@ -9889,9 +9519,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] name = "types" @@ -9903,10 +9533,9 @@ dependencies = [ "beacon_chain", "bls", "compare_fields", - "compare_fields_derive", "context_deserialize", "criterion", - "derivative", + "educe", "eth2_interop_keypairs", "ethereum_hashing", "ethereum_serde_utils", @@ -9921,9 +9550,9 @@ dependencies = [ "merkle_proof", "metastruct", "milhouse", - "parking_lot 0.12.3", + "parking_lot", "paste", - "rand 0.9.0", + "rand 0.9.2", "rand_xorshift 0.4.0", "rayon", "regex", @@ -9944,6 +9573,7 @@ dependencies = [ "tracing", "tree_hash", "tree_hash_derive", + "typenum", ] [[package]] @@ -9996,25 +9626,19 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" [[package]] name = "unicode-normalization" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8" dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" - [[package]] name = "unicode-xid" version = "0.2.6" @@ -10062,21 +9686,16 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -10095,22 +9714,24 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", "serde", ] [[package]] name = "uuid" -version = "1.15.1" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.4", + "js-sys", + "wasm-bindgen", ] [[package]] name = "validator_client" -version = "0.3.5" +version = "8.0.1" dependencies = [ "account_utils", "beacon_node_fallback", @@ -10123,13 +9744,13 @@ dependencies = [ "eth2", "fdlimit", "graffiti_file", - "hyper 1.6.0", + "hyper 1.8.1", "initialized_validators", "lighthouse_validator_store", "metrics", "monitoring_api", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "sensitive_url", "serde", "slashing_protection", @@ -10150,12 +9771,12 @@ version = "0.1.0" dependencies = [ "bls", "deposit_contract", - "derivative", + "educe", "eth2_keystore", "filesystem", "hex", "lockfile", - "rand 0.9.0", + "rand 0.9.2", "tempfile", "tree_hash", "types", @@ -10176,6 +9797,7 @@ dependencies = [ "eth2_keystore", "ethereum_serde_utils", "filesystem", + "fixed_bytes", "futures", "graffiti_file", "health_metrics", @@ -10184,14 +9806,15 @@ dependencies = [ "lighthouse_validator_store", "lighthouse_version", "logging", - "parking_lot 0.12.3", - "rand 0.9.0", + "parking_lot", + "rand 0.9.2", "sensitive_url", "serde", "serde_json", "signing_method", "slashing_protection", "slot_clock", + "ssz_types", "sysinfo", "system_health", "task_executor", @@ -10199,6 +9822,7 @@ dependencies = [ "tokio", "tokio-stream", "tracing", + "typenum", "types", "url", "validator_dir", @@ -10219,7 +9843,7 @@ dependencies = [ "logging", "malloc_utils", "metrics", - "parking_lot 0.12.3", + "parking_lot", "serde", "slot_clock", "tracing", @@ -10236,9 +9860,10 @@ version = "0.1.0" dependencies = [ "account_utils", "beacon_chain", + "bls", "clap", "clap_utils", - "derivative", + "educe", "environment", "eth2", "eth2_network_config", @@ -10276,7 +9901,7 @@ dependencies = [ "futures", "graffiti_file", "logging", - "parking_lot 0.12.3", + "parking_lot", "safe_arith", "slot_clock", "task_executor", @@ -10292,6 +9917,7 @@ dependencies = [ name = "validator_store" version = "0.1.0" dependencies = [ + "bls", "eth2", "slashing_protection", "types", @@ -10379,7 +10005,7 @@ dependencies = [ "mime_guess", "percent-encoding", "pin-project", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "scoped-tls", "serde", "serde_json", @@ -10409,50 +10035,37 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.13.3+wasi-0.2.2" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log", - "proc-macro2", - "quote", - "syn 2.0.100", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" dependencies = [ "cfg-if", "js-sys", @@ -10463,9 +10076,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10473,22 +10086,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.100", - "wasm-bindgen-backend", + "syn 2.0.110", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] @@ -10507,25 +10120,24 @@ dependencies = [ ] [[package]] -name = "wasm-timer" -version = "0.2.5" +name = "wasmtimer" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0ecb0db480561e9a7642b5d3e4187c128914e58aa84330b9493e3eb68c5e7f" +checksum = "1c598d6b99ea013e35844697fc4670d08339d5cda15588f193c6beedd12f644b" dependencies = [ "futures", "js-sys", - "parking_lot 0.11.2", + "parking_lot", "pin-utils", + "slab", "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", ] [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" dependencies = [ "js-sys", "wasm-bindgen", @@ -10547,21 +10159,24 @@ version = "0.1.0" dependencies = [ "account_utils", "async-channel 1.9.0", + "bls", "environment", "eth2", "eth2_keystore", "eth2_network_config", + "fixed_bytes", "futures", "initialized_validators", "lighthouse_validator_store", "logging", - "parking_lot 0.12.3", - "reqwest 0.11.27", + "parking_lot", + "reqwest", "serde", "serde_json", "serde_yaml", "slashing_protection", "slot_clock", + "ssz_types", "task_executor", "tempfile", "tokio", @@ -10573,9 +10188,12 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.25.4" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e" +dependencies = [ + "rustls-pki-types", +] [[package]] name = "which" @@ -10597,9 +10215,9 @@ checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471" [[package]] name = "winapi" @@ -10619,11 +10237,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -10642,16 +10260,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" -dependencies = [ - "windows-core 0.58.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows-acl" version = "0.3.0" @@ -10664,15 +10272,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.6", -] - [[package]] name = "windows-core" version = "0.53.0" @@ -10685,55 +10284,44 @@ dependencies = [ [[package]] name = "windows-core" -version = "0.58.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-link", + "windows-result 0.4.1", + "windows-strings", ] [[package]] name = "windows-implement" -version = "0.58.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "windows-interface" -version = "0.58.0" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] name = "windows-link" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dccfd733ce2b1753b03b6d3c65edf020262ea35e20ccdf3e288043e6dd620e3" - -[[package]] -name = "windows-registry" -version = "0.4.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" -dependencies = [ - "windows-result 0.3.1", - "windows-strings 0.3.1", - "windows-targets 0.53.2", -] +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" @@ -10746,37 +10334,18 @@ dependencies = [ [[package]] name = "windows-result" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06374efe858fab7e4f881500e6e86ec8bc28f9462c47e5a9941a0142ad86b189" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-strings" -version = "0.3.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -10817,6 +10386,24 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -10865,18 +10452,19 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] @@ -10899,9 +10487,9 @@ checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" @@ -10923,9 +10511,9 @@ checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" @@ -10947,9 +10535,9 @@ checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" [[package]] name = "windows_i686_gnullvm" @@ -10959,9 +10547,9 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" @@ -10983,9 +10571,9 @@ checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" @@ -11007,9 +10595,9 @@ checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" @@ -11031,9 +10619,9 @@ checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.53.0" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" @@ -11055,24 +10643,15 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - -[[package]] -name = "winnow" -version = "0.5.40" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" -dependencies = [ - "memchr", -] +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" dependencies = [ "memchr", ] @@ -11088,13 +10667,10 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.33.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" -dependencies = [ - "bitflags 2.9.0", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "workspace_members" @@ -11104,42 +10680,11 @@ dependencies = [ "quote", ] -[[package]] -name = "write16" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" - [[package]] name = "writeable" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" - -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version 0.4.1", - "send_wrapper", - "thiserror 1.0.69", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - -[[package]] -name = "wyz" -version = "0.2.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" [[package]] name = "wyz" @@ -11175,7 +10720,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.17", "time", ] @@ -11195,9 +10740,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.25" +version = "0.8.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5b940ebc25896e71dd073bad2dbaa2abfe97b0a391415e22ad1326d9c54e3c4" +checksum = "3ae8337f8a065cfc972643663ea4279e04e7256de865aa66fe25cec5fb912d3f" [[package]] name = "xmltree" @@ -11228,7 +10773,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "pin-project", "rand 0.8.5", "static_assertions", @@ -11236,16 +10781,16 @@ dependencies = [ [[package]] name = "yamux" -version = "0.13.4" +version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17610762a1207ee816c6fadc29220904753648aba0a9ed61c7b8336e80a559c4" +checksum = "deab71f2e20691b4728b349c6cee8fc7223880fa67b6b4f92225ec32225447e5" dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.3", + "parking_lot", "pin-project", - "rand 0.8.5", + "rand 0.9.2", "static_assertions", "web-time", ] @@ -11261,11 +10806,10 @@ dependencies = [ [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" dependencies = [ - "serde", "stable_deref_trait", "yoke-derive", "zerofrom", @@ -11273,54 +10817,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" -dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd97444d05a4328b90e75e503a34bad781f14e28a823ad3557f0750df1ebcbc6" -dependencies = [ - "zerocopy-derive 0.8.23", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.23" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6352c01d0edd5db859a63e2605f4ea3183ddbd15e2c4a9e7d32184df75e4f154" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -11340,15 +10864,15 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" dependencies = [ "serde", "zeroize_derive", @@ -11362,14 +10886,25 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", ] [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ "yoke", "zerofrom", @@ -11378,13 +10913,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.110", ] [[package]] @@ -11422,7 +10957,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ - "zstd-safe 7.2.3", + "zstd-safe 7.2.4", ] [[package]] @@ -11437,18 +10972,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index ae84d645bb9..d5d1687c764 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,8 +19,6 @@ members = [ "boot_node", "common/account_utils", "common/clap_utils", - "common/compare_fields", - "common/compare_fields_derive", "common/deposit_contract", "common/directory", "common/eip_3076", @@ -41,7 +39,6 @@ members = [ "common/network_utils", "common/oneshot_broadcast", "common/pretty_reqwest_error", - "common/sensitive_url", "common/slot_clock", "common/system_health", "common/target_check", @@ -50,8 +47,6 @@ members = [ "common/validator_dir", "common/warp_utils", "common/workspace_members", - "consensus/context_deserialize/context_deserialize", - "consensus/context_deserialize/context_deserialize_derive", "consensus/fixed_bytes", "consensus/fork_choice", "consensus/int_to_bytes", @@ -96,12 +91,19 @@ resolver = "2" [workspace.package] edition = "2024" +version = "8.0.1" [workspace.dependencies] account_utils = { path = "common/account_utils" } -alloy-consensus = { version = "0.14.0", default-features = false } -alloy-primitives = { version = "1.0", features = ["rlp", "getrandom"] } -alloy-rlp = "0.3.4" +alloy-consensus = { version = "1", default-features = false } +alloy-dyn-abi = { version = "1", default-features = false } +alloy-json-abi = { version = "1", default-features = false } +alloy-network = { version = "1", default-features = false } +alloy-primitives = { version = "1", default-features = false, features = ["rlp", "getrandom"] } +alloy-provider = { version = "1", default-features = false, features = ["reqwest"] } +alloy-rlp = { version = "0.3", default-features = false } +alloy-rpc-types-eth = { version = "1", default-features = false, features = ["serde"] } +alloy-signer-local = { version = "1", default-features = false } anyhow = "1" arbitrary = { version = "1", features = ["derive"] } async-channel = "1.9.0" @@ -121,21 +123,17 @@ c-kzg = { version = "2.1", default-features = false } cargo_metadata = "0.19" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } clap_utils = { path = "common/clap_utils" } -compare_fields = { path = "common/compare_fields" } -compare_fields_derive = { path = "common/compare_fields_derive" } +compare_fields = "0.1" console-subscriber = "0.4" -context_deserialize = { path = "consensus/context_deserialize/context_deserialize", features = [ - "all", -] } -context_deserialize_derive = { path = "consensus/context_deserialize/context_deserialize_derive" } +context_deserialize = "0.2" criterion = "0.5" delay_map = "0.4" deposit_contract = { path = "common/deposit_contract" } -derivative = "2" directory = { path = "common/directory" } dirs = "3" discv5 = { version = "0.10", features = ["libp2p"] } doppelganger_service = { path = "validator_client/doppelganger_service" } +educe = "0.6" eip_3076 = { path = "common/eip_3076" } either = "1.9" environment = { path = "lighthouse/environment" } @@ -145,14 +143,10 @@ eth2_key_derivation = { path = "crypto/eth2_key_derivation" } eth2_keystore = { path = "crypto/eth2_keystore" } eth2_network_config = { path = "common/eth2_network_config" } eth2_wallet = { path = "crypto/eth2_wallet" } -ethereum_hashing = "0.7.0" +ethereum_hashing = "0.8.0" ethereum_serde_utils = "0.8.0" -ethereum_ssz = "0.9.0" -ethereum_ssz_derive = "0.9.0" -ethers-core = "1" -ethers-middleware = { version = "1", default-features = false } -ethers-providers = { version = "1", default-features = false } -ethers-signers = { version = "1", default-features = false } +ethereum_ssz = { version = "0.10.0", features = ["context_deserialize"] } +ethereum_ssz_derive = "0.10.0" execution_layer = { path = "beacon_node/execution_layer" } exit-future = "0.2" filesystem = { path = "common/filesystem" } @@ -189,7 +183,7 @@ malloc_utils = { path = "common/malloc_utils" } maplit = "1" merkle_proof = { path = "consensus/merkle_proof" } metrics = { path = "common/metrics" } -milhouse = { version = "0.7", default-features = false } +milhouse = { version = "0.9", default-features = false, features = ["context_deserialize"] } mockall = "0.13" mockall_double = "0.3" mockito = "1.5.0" @@ -207,15 +201,14 @@ parking_lot = "0.12" paste = "1" pretty_reqwest_error = { path = "common/pretty_reqwest_error" } prometheus = { version = "0.13", default-features = false } +proptest = "1" proto_array = { path = "consensus/proto_array" } -quickcheck = "1" -quickcheck_macros = "1" quote = "1" r2d2 = "0.8" rand = "0.9.0" rayon = "1.7" regex = "1" -reqwest = { version = "0.11", default-features = false, features = [ +reqwest = { version = "0.12", default-features = false, features = [ "blocking", "json", "stream", @@ -227,7 +220,7 @@ rpds = "0.11" rusqlite = { version = "0.28", features = ["bundled"] } rust_eth_kzg = "0.9" safe_arith = "0.1" -sensitive_url = { path = "common/sensitive_url" } +sensitive_url = { version = "0.1", features = ["serde"] } serde = { version = "1", features = ["derive"] } serde_json = "1" serde_repr = "0.1" @@ -239,13 +232,13 @@ slashing_protection = { path = "validator_client/slashing_protection" } slot_clock = { path = "common/slot_clock" } smallvec = { version = "1.11.2", features = ["arbitrary"] } snap = "1" -ssz_types = "0.11.0" +ssz_types = { version = "0.14.0", features = ["context_deserialize", "runtime_types"] } state_processing = { path = "consensus/state_processing" } store = { path = "beacon_node/store" } -strum = { version = "0.24", features = ["derive"] } +strum = { version = "0.27", features = ["derive"] } superstruct = "0.10" swap_or_not_shuffle = { path = "consensus/swap_or_not_shuffle" } -syn = "1" +syn = "2" sysinfo = "0.26" system_health = { path = "common/system_health" } task_executor = { path = "common/task_executor" } @@ -264,8 +257,9 @@ tracing-core = "0.1" tracing-log = "0.2" tracing-opentelemetry = "0.31.0" tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] } -tree_hash = "0.10.0" -tree_hash_derive = "0.10.0" +tree_hash = "0.12.0" +tree_hash_derive = "0.12.0" +typenum = "1" types = { path = "consensus/types" } url = "2" uuid = { version = "0.8", features = ["serde", "v4"] } @@ -291,13 +285,6 @@ lto = "fat" codegen-units = 1 incremental = false -[profile.reproducible] -inherits = "release" -debug = false -panic = "abort" -codegen-units = 1 -overflow-checks = true - [profile.release-debug] inherits = "release" debug = true diff --git a/Dockerfile b/Dockerfile index f925836e48e..8cc20ab000f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,13 +1,19 @@ FROM rust:1.88.0-bullseye AS builder RUN apt-get update && apt-get -y upgrade && apt-get install -y cmake libclang-dev -COPY . lighthouse ARG FEATURES ARG PROFILE=release ARG CARGO_USE_GIT_CLI=true ENV FEATURES=$FEATURES ENV PROFILE=$PROFILE ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_USE_GIT_CLI -RUN cd lighthouse && make +ENV CARGO_INCREMENTAL=1 + +WORKDIR /lighthouse +COPY . . +# Persist the registry and target file across builds. See: https://docs.docker.com/build/cache/optimize/#use-cache-mounts +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/lighthouse/target \ + make FROM ubuntu:22.04 RUN apt-get update && apt-get -y upgrade && apt-get install -y --no-install-recommends \ diff --git a/Dockerfile.reproducible b/Dockerfile.reproducible index 24ba5a58a9b..903515373f8 100644 --- a/Dockerfile.reproducible +++ b/Dockerfile.reproducible @@ -3,42 +3,22 @@ ARG RUST_IMAGE="rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9 FROM ${RUST_IMAGE} AS builder # Install specific version of the build dependencies -RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 cmake=3.18.4-2+deb11u1 +RUN apt-get update && apt-get install -y libclang-dev=1:11.0-51+nmu5 cmake=3.18.4-2+deb11u1 libjemalloc-dev=5.2.1-3 -# Add target architecture argument with default value ARG RUST_TARGET="x86_64-unknown-linux-gnu" # Copy the project to the container -COPY . /app +COPY ./ /app WORKDIR /app -# Get the latest commit timestamp and set SOURCE_DATE_EPOCH (default it to 0 if not passed) -ARG SOURCE_DATE=0 - -# Set environment variables for reproducibility -ARG RUSTFLAGS="-C link-arg=-Wl,--build-id=none -C metadata='' --remap-path-prefix $(pwd)=." -ENV SOURCE_DATE_EPOCH=$SOURCE_DATE \ - CARGO_INCREMENTAL=0 \ - LC_ALL=C \ - TZ=UTC \ - RUSTFLAGS="${RUSTFLAGS}" - -# Set the default features if not provided -ARG FEATURES="gnosis,slasher-lmdb,slasher-mdbx,slasher-redb,jemalloc" - -# Set the default profile if not provided -ARG PROFILE="reproducible" - # Build the project with the reproducible settings -RUN cargo build --bin lighthouse \ - --features "${FEATURES}" \ - --profile "${PROFILE}" \ - --locked \ - --target "${RUST_TARGET}" +RUN make build-reproducible -RUN mv /app/target/${RUST_TARGET}/${PROFILE}/lighthouse /lighthouse +# Move the binary to a standard location +RUN mv /app/target/${RUST_TARGET}/release/lighthouse /lighthouse # Create a minimal final image with just the binary FROM gcr.io/distroless/cc-debian12:nonroot-6755e21ccd99ddead6edc8106ba03888cbeed41a COPY --from=builder /lighthouse /lighthouse + ENTRYPOINT [ "/lighthouse" ] diff --git a/Makefile b/Makefile index b9f93942f6f..9d08c3ebe18 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ EF_TESTS = "testing/ef_tests" STATE_TRANSITION_VECTORS = "testing/state_transition_vectors" EXECUTION_ENGINE_INTEGRATION = "testing/execution_engine_integration" -GIT_TAG := $(shell git describe --tags --candidates 1) +GIT_TAG = $(shell git describe --tags --candidates 1) BIN_DIR = "bin" X86_64_TAG = "x86_64-unknown-linux-gnu" @@ -30,12 +30,13 @@ TEST_FEATURES ?= # Cargo profile for regular builds. PROFILE ?= release -# List of all hard forks. This list is used to set env variables for several tests so that +# List of all hard forks up to gloas. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair bellatrix capella deneb electra fulu gloas +# TODO(EIP-7732) Remove this once we extend network tests to support gloas and use RECENT_FORKS instead +RECENT_FORKS_BEFORE_GLOAS=electra fulu # List of all recent hard forks. This list is used to set env variables for http_api tests -RECENT_FORKS=electra fulu +RECENT_FORKS=electra fulu gloas # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= @@ -85,36 +86,67 @@ build-lcli-aarch64: build-lcli-riscv64: cross build --bin lcli --target riscv64gc-unknown-linux-gnu --features "portable" --profile "$(CROSS_PROFILE)" --locked -# extracts the current source date for reproducible builds -SOURCE_DATE := $(shell git log -1 --pretty=%ct) +# Environment variables for reproducible builds +# Initialize RUSTFLAGS +RUST_BUILD_FLAGS = +# Remove build ID from the binary to ensure reproducibility across builds +RUST_BUILD_FLAGS += -C link-arg=-Wl,--build-id=none +# Remove metadata hash from symbol names to ensure reproducible builds +RUST_BUILD_FLAGS += -C metadata='' -# Default image for x86_64 +# Set timestamp from last git commit for reproducible builds +SOURCE_DATE ?= $(shell git log -1 --pretty=%ct) + +# Disable incremental compilation to avoid non-deterministic artifacts +CARGO_INCREMENTAL_VAL = 0 +# Set C locale for consistent string handling and sorting +LOCALE_VAL = C +# Set UTC timezone for consistent time handling across builds +TZ_VAL = UTC + +# Features for reproducible builds +FEATURES_REPRODUCIBLE = $(CROSS_FEATURES),jemalloc-unprefixed + +# Derive the architecture-specific library path from RUST_TARGET +JEMALLOC_LIB_ARCH = $(word 1,$(subst -, ,$(RUST_TARGET))) +JEMALLOC_OVERRIDE = /usr/lib/$(JEMALLOC_LIB_ARCH)-linux-gnu/libjemalloc.a + +# Default target architecture +RUST_TARGET ?= x86_64-unknown-linux-gnu + +# Default images for different architectures RUST_IMAGE_AMD64 ?= rust:1.88-bullseye@sha256:8e3c421122bf4cd3b2a866af41a4dd52d87ad9e315fd2cb5100e87a7187a9816 +RUST_IMAGE_ARM64 ?= rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 -# Reproducible build for x86_64 -build-reproducible-x86_64: +.PHONY: build-reproducible +build-reproducible: ## Build the lighthouse binary into `target` directory with reproducible builds + SOURCE_DATE_EPOCH=$(SOURCE_DATE) \ + RUSTFLAGS="${RUST_BUILD_FLAGS} --remap-path-prefix $$(pwd)=." \ + CARGO_INCREMENTAL=${CARGO_INCREMENTAL_VAL} \ + LC_ALL=${LOCALE_VAL} \ + TZ=${TZ_VAL} \ + JEMALLOC_OVERRIDE=${JEMALLOC_OVERRIDE} \ + cargo build --bin lighthouse --features "$(FEATURES_REPRODUCIBLE)" --profile "$(PROFILE)" --locked --target $(RUST_TARGET) + +.PHONY: build-reproducible-x86_64 +build-reproducible-x86_64: ## Build reproducible x86_64 Docker image DOCKER_BUILDKIT=1 docker build \ --build-arg RUST_TARGET="x86_64-unknown-linux-gnu" \ --build-arg RUST_IMAGE=$(RUST_IMAGE_AMD64) \ - --build-arg SOURCE_DATE=$(SOURCE_DATE) \ -f Dockerfile.reproducible \ -t lighthouse:reproducible-amd64 . -# Default image for arm64 -RUST_IMAGE_ARM64 ?= rust:1.88-bullseye@sha256:8b22455a7ce2adb1355067638284ee99d21cc516fab63a96c4514beaf370aa94 - -# Reproducible build for aarch64 -build-reproducible-aarch64: +.PHONY: build-reproducible-aarch64 +build-reproducible-aarch64: ## Build reproducible aarch64 Docker image DOCKER_BUILDKIT=1 docker build \ --platform linux/arm64 \ --build-arg RUST_TARGET="aarch64-unknown-linux-gnu" \ --build-arg RUST_IMAGE=$(RUST_IMAGE_ARM64) \ - --build-arg SOURCE_DATE=$(SOURCE_DATE) \ -f Dockerfile.reproducible \ -t lighthouse:reproducible-arm64 . -# Build both architectures -build-reproducible-all: build-reproducible-x86_64 build-reproducible-aarch64 +.PHONY: build-reproducible-all +build-reproducible-all: build-reproducible-x86_64 build-reproducible-aarch64 ## Build both x86_64 and aarch64 reproducible Docker images # Create a `.tar.gz` containing a binary for a specific target. define tarball_release_binary @@ -171,20 +203,21 @@ run-ef-tests: ./$(EF_TESTS)/check_all_files_accessed.py $(EF_TESTS)/.accessed_file_log.txt $(EF_TESTS)/consensus-spec-tests # Run the tests in the `beacon_chain` crate for all known forks. -test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(FORKS)) +# TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead +test-beacon-chain: $(patsubst %,test-beacon-chain-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-beacon-chain-%: env FORK_NAME=$* cargo nextest run --release --features "fork_from_env,slasher/lmdb,$(TEST_FEATURES)" -p beacon_chain # Run the tests in the `http_api` crate for recent forks. -test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS)) +test-http-api: $(patsubst %,test-http-api-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-http-api-%: env FORK_NAME=$* cargo nextest run --release --features "beacon_chain/fork_from_env" -p http_api # Run the tests in the `operation_pool` crate for all known forks. -test-op-pool: $(patsubst %,test-op-pool-%,$(FORKS)) +test-op-pool: $(patsubst %,test-op-pool-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-op-pool-%: env FORK_NAME=$* cargo nextest run --release \ @@ -192,7 +225,8 @@ test-op-pool-%: -p operation_pool # Run the tests in the `network` crate for all known forks. -test-network: $(patsubst %,test-network-%,$(FORKS)) +# TODO(EIP-7732) Extend to support gloas by using RECENT_FORKS instead +test-network: $(patsubst %,test-network-%,$(RECENT_FORKS_BEFORE_GLOAS)) test-network-%: env FORK_NAME=$* cargo nextest run --release \ @@ -292,6 +326,15 @@ install-audit: audit-CI: cargo audit +# Runs cargo deny (check for banned crates, duplicate versions, and source restrictions) +deny: install-deny deny-CI + +install-deny: + cargo install --force cargo-deny --version 0.18.2 + +deny-CI: + cargo deny check bans sources + # Runs `cargo vendor` to make sure dependencies can be vendored for packaging, reproducibility and archival purpose. vendor: cargo vendor diff --git a/account_manager/Cargo.toml b/account_manager/Cargo.toml index 071e2681dd1..8dd50cbc6ee 100644 --- a/account_manager/Cargo.toml +++ b/account_manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "account_manager" -version = "0.3.5" +version = { workspace = true } authors = [ "Paul Hauner ", "Luke Anderson ", diff --git a/account_manager/src/validator/slashing_protection.rs b/account_manager/src/validator/slashing_protection.rs index 18064b990f3..96098ccbbd1 100644 --- a/account_manager/src/validator/slashing_protection.rs +++ b/account_manager/src/validator/slashing_protection.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use environment::Environment; use slashing_protection::{ @@ -7,7 +8,7 @@ use slashing_protection::{ use std::fs::File; use std::path::PathBuf; use std::str::FromStr; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; pub const CMD: &str = "slashing-protection"; pub const IMPORT_CMD: &str = "import"; diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index ea834357c73..5352814dd5d 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "8.0.1" +version = { workspace = true } authors = [ "Paul Hauner ", "Age Manning ( let blobs = (0..num_of_blobs) .map(|_| Blob::::default()) .collect::>() - .into(); - let proofs = vec![KzgProof::empty(); num_of_blobs * E::number_of_columns()].into(); + .try_into() + .unwrap(); + let proofs = vec![KzgProof::empty(); num_of_blobs * E::number_of_columns()] + .try_into() + .unwrap(); (signed_block, blobs, proofs) } diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index f740d221c0b..faa396966ff 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -57,7 +57,7 @@ use state_processing::{ }; use std::borrow::Cow; use strum::AsRefStr; -use tracing::debug; +use tracing::{debug, error}; use tree_hash::TreeHash; use types::{ Attestation, AttestationData, AttestationRef, BeaconCommittee, @@ -267,6 +267,14 @@ pub enum Error { /// We were unable to process this attestation due to an internal error. It's unclear if the /// attestation is valid. BeaconChainError(Box), + /// A critical error occurred while converting SSZ types. + /// This can only occur when a VariableList was not able to be constructed from a single + /// attestation. + /// + /// ## Peer scoring + /// + /// The peer has sent an invalid message. + SszTypesError(ssz_types::Error), } impl From for Error { @@ -275,6 +283,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Self::SszTypesError(e) + } +} + /// Used to avoid double-checking signatures. #[derive(Copy, Clone)] enum CheckAttestationSignature { @@ -442,7 +456,18 @@ fn process_slash_info( .spec .fork_name_at_slot::(attestation.data.slot); - let indexed_attestation = attestation.to_indexed(fork_name); + let indexed_attestation = match attestation.to_indexed(fork_name) { + Ok(indexed) => indexed, + Err(e) => { + error!( + attestation_root = ?attestation.data.tree_hash_root(), + error = ?e, + "Unable to construct VariableList from a single attestation. \ + This indicates a serious bug in SSZ handling" + ); + return Error::SszTypesError(e); + } + }; (indexed_attestation, true, err) } SignatureNotCheckedIndexed(indexed, err) => (indexed, true, err), @@ -932,7 +957,9 @@ impl<'a, T: BeaconChainTypes> IndexedUnaggregatedAttestation<'a, T> { .spec .fork_name_at_slot::(attestation.data.slot); - let indexed_attestation = attestation.to_indexed(fork_name); + let indexed_attestation = attestation + .to_indexed(fork_name) + .map_err(|e| SignatureNotCheckedSingle(attestation, Error::SszTypesError(e)))?; let validator_index = match Self::verify_middle_checks(attestation, chain) { Ok(t) => t, diff --git a/beacon_node/beacon_chain/src/attester_cache.rs b/beacon_node/beacon_chain/src/attester_cache.rs index f879adfb498..26a33898129 100644 --- a/beacon_node/beacon_chain/src/attester_cache.rs +++ b/beacon_node/beacon_chain/src/attester_cache.rs @@ -10,14 +10,15 @@ //! and penalties can be computed and the `state.current_justified_checkpoint` can be updated. use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use fixed_bytes::FixedBytesExtended; use parking_lot::RwLock; use state_processing::state_advance::{Error as StateAdvanceError, partial_state_advance}; use std::collections::HashMap; use std::ops::Range; use types::{ - BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - Hash256, RelativeEpoch, Slot, - attestation::Error as AttestationError, + BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, RelativeEpoch, + Slot, + attestation::AttestationError, beacon_state::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, }, diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index c816a0b29f3..a462376cc03 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -685,13 +685,13 @@ impl From for BeaconChainError { mod tests { use crate::beacon_block_streamer::{BeaconBlockStreamer, CheckCaches}; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; use execution_layer::test_utils::Block; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; use std::sync::LazyLock; use tokio::sync::mpsc; - use types::{ - ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MinimalEthSpec, Slot, - }; + use types::{ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot}; const VALIDATOR_COUNT: usize = 48; @@ -715,8 +715,9 @@ mod tests { harness } + // TODO(EIP-7732) Extend this test for gloas #[tokio::test] - async fn check_all_blocks_from_altair_to_gloas() { + async fn check_all_blocks_from_altair_to_fulu() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; let num_epochs = 12; let bellatrix_fork_epoch = 2usize; @@ -724,7 +725,6 @@ mod tests { let deneb_fork_epoch = 6usize; let electra_fork_epoch = 8usize; let fulu_fork_epoch = 10usize; - let gloas_fork_epoch = 12usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); @@ -734,7 +734,6 @@ mod tests { spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch as u64)); - spec.gloas_fork_epoch = Some(Epoch::new(gloas_fork_epoch as u64)); let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 3e02baf9017..46ba14f596b 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -33,7 +33,7 @@ use crate::events::ServerSentEventHandler; use crate::execution_payload::{NotifyExecutionLayer, PreparePayloadHandle, get_execution_payload}; use crate::fetch_blobs::EngineGetBlobsOutput; use crate::fork_choice_signal::{ForkChoiceSignalRx, ForkChoiceSignalTx, ForkChoiceWaitResult}; -use crate::graffiti_calculator::GraffitiCalculator; +use crate::graffiti_calculator::{GraffitiCalculator, GraffitiSettings}; use crate::kzg_utils::reconstruct_blobs; use crate::light_client_finality_update_verification::{ Error as LightClientFinalityUpdateError, VerifiedLightClientFinalityUpdate, @@ -74,6 +74,8 @@ use crate::{ AvailabilityPendingExecutedBlock, BeaconChainError, BeaconForkChoiceStore, BeaconSnapshot, CachedHead, metrics, }; +use bls::{PublicKey, PublicKeyBytes, Signature}; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{ EventKind, SseBlobSidecar, SseBlock, SseDataColumnSidecar, SseExtendedPayloadAttributes, }; @@ -81,6 +83,7 @@ use execution_layer::{ BlockProposalContents, BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, FailedCondition, PayloadAttributes, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ AttestationFromBlock, ExecutionStatus, ForkChoice, ForkchoiceUpdateParameters, InvalidationOperation, PayloadVerificationStatus, ResetPayloadStatuses, @@ -883,6 +886,12 @@ impl BeaconChain { return Ok(None); } + // Fast-path for the split slot (which usually corresponds to the finalized slot). + let split = self.store.get_split_info(); + if request_slot == split.slot { + return Ok(Some(split.state_root)); + } + // Try an optimized path of reading the root directly from the head state. let fast_lookup: Option = self.with_head(|head| { if head.beacon_block.slot() <= request_slot { @@ -1242,7 +1251,7 @@ impl BeaconChain { let num_required_columns = T::EthSpec::number_of_columns() / 2; let reconstruction_possible = columns.len() >= num_required_columns; if reconstruction_possible { - reconstruct_blobs(&self.kzg, &columns, None, &block, &self.spec) + reconstruct_blobs(&self.kzg, columns, None, &block, &self.spec) .map(Some) .map_err(Error::FailedToReconstructBlobs) } else { @@ -1406,10 +1415,10 @@ impl BeaconChain { /// /// Returns `(block_root, block_slot)`. pub fn heads(&self) -> Vec<(Hash256, Slot)> { - self.canonical_head - .fork_choice_read_lock() + let fork_choice = self.canonical_head.fork_choice_read_lock(); + fork_choice .proto_array() - .heads_descended_from_finalization::() + .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()) .iter() .map(|node| (node.root, node.slot)) .collect() @@ -4484,7 +4493,7 @@ impl BeaconChain { self: &Arc, randao_reveal: Signature, slot: Slot, - validator_graffiti: Option, + graffiti_settings: GraffitiSettings, verification: ProduceBlockVerification, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -4518,7 +4527,7 @@ impl BeaconChain { state_root_opt, slot, randao_reveal, - validator_graffiti, + graffiti_settings, verification, builder_boost_factor, block_production_version, @@ -5051,7 +5060,7 @@ impl BeaconChain { state_root_opt: Option, produce_at_slot: Slot, randao_reveal: Signature, - validator_graffiti: Option, + graffiti_settings: GraffitiSettings, verification: ProduceBlockVerification, builder_boost_factor: Option, block_production_version: BlockProductionVersion, @@ -5062,7 +5071,7 @@ impl BeaconChain { let chain = self.clone(); let graffiti = self .graffiti_calculator - .get_graffiti(validator_graffiti) + .get_graffiti(graffiti_settings) .await; let span = Span::current(); let mut partial_beacon_block = self @@ -5483,11 +5492,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, _phantom: PhantomData, }, }), @@ -5504,11 +5523,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, _phantom: PhantomData, @@ -5531,11 +5560,21 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_proposal_contents @@ -5563,18 +5602,30 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: block_proposal_contents .to_payload() .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, }, }), None, @@ -5602,17 +5653,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_base.into(), - attestations: attestations_base.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_base + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments.ok_or( BlockProductionError::MissingKzgCommitment( "Kzg commitments missing from block contents".to_string(), @@ -5645,17 +5708,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + proposer_slashings: proposer_slashings + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, execution_requests: maybe_requests @@ -5687,59 +5762,29 @@ impl BeaconChain { randao_reveal, eth1_data, graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), - sync_aggregate: sync_aggregate - .ok_or(BlockProductionError::MissingSyncAggregate)?, - execution_payload: payload + proposer_slashings: proposer_slashings .try_into() - .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), - blob_kzg_commitments: kzg_commitments - .ok_or(BlockProductionError::InvalidPayloadFork)?, - execution_requests: maybe_requests - .ok_or(BlockProductionError::MissingExecutionRequests)?, - }, - }), - maybe_blobs_and_proofs, - execution_payload_value, - ) - } - BeaconState::Gloas(_) => { - let ( - payload, - kzg_commitments, - maybe_blobs_and_proofs, - maybe_requests, - execution_payload_value, - ) = block_contents - .ok_or(BlockProductionError::MissingExecutionPayload)? - .deconstruct(); - - ( - BeaconBlock::Gloas(BeaconBlockGloas { - slot, - proposer_index, - parent_root, - state_root: Hash256::zero(), - body: BeaconBlockBodyGloas { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings: proposer_slashings.into(), - attester_slashings: attester_slashings_electra.into(), - attestations: attestations_electra.into(), - deposits: deposits.into(), - voluntary_exits: voluntary_exits.into(), + .map_err(BlockProductionError::SszTypesError)?, + attester_slashings: attester_slashings_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + attestations: attestations_electra + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + deposits: deposits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, + voluntary_exits: voluntary_exits + .try_into() + .map_err(BlockProductionError::SszTypesError)?, sync_aggregate: sync_aggregate .ok_or(BlockProductionError::MissingSyncAggregate)?, execution_payload: payload .try_into() .map_err(|_| BlockProductionError::InvalidPayloadFork)?, - bls_to_execution_changes: bls_to_execution_changes.into(), + bls_to_execution_changes: bls_to_execution_changes + .try_into() + .map_err(BlockProductionError::SszTypesError)?, blob_kzg_commitments: kzg_commitments .ok_or(BlockProductionError::InvalidPayloadFork)?, execution_requests: maybe_requests @@ -5750,6 +5795,7 @@ impl BeaconChain { execution_payload_value, ) } + BeaconState::Gloas(_) => return Err(BlockProductionError::GloasNotImplemented), }; let block = SignedBeaconBlock::from_block( diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 440388661c2..60487f9c469 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -5,7 +5,8 @@ //! reads when fork choice requires the validator balances of the justified state. use crate::{BeaconSnapshot, metrics}; -use derivative::Derivative; +use educe::Educe; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkChoiceStore; use proto_array::JustifiedBalances; use safe_arith::ArithError; @@ -17,7 +18,7 @@ use store::{Error as StoreError, HotColdDB, ItemStore}; use superstruct::superstruct; use types::{ AbstractExecPayload, BeaconBlockRef, BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, Slot, + Hash256, Slot, }; #[derive(Debug)] @@ -127,10 +128,10 @@ impl BalancesCache { /// Implements `fork_choice::ForkChoiceStore` in order to provide a persistent backing to the /// `fork_choice::ForkChoice` struct. -#[derive(Debug, Derivative)] -#[derivative(PartialEq(bound = "E: EthSpec, Hot: ItemStore, Cold: ItemStore"))] +#[derive(Debug, Educe)] +#[educe(PartialEq(bound(E: EthSpec, Hot: ItemStore, Cold: ItemStore)))] pub struct BeaconForkChoiceStore, Cold: ItemStore> { - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] store: Arc>, balances_cache: BalancesCache, time: Slot, diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index bd6460eba7d..a923d657a86 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -18,10 +18,9 @@ use state_processing::state_advance::partial_state_advance; use std::num::NonZeroUsize; use std::sync::Arc; use tracing::instrument; +use typenum::Unsigned; use types::non_zero_usize::new_non_zero_usize; -use types::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot}; /// The number of sets of proposer indices that should be cached. const CACHE_SIZE: NonZeroUsize = new_non_zero_usize(16); diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 53f2eff0ca3..874673b52e8 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -1,4 +1,4 @@ -use derivative::Derivative; +use educe::Educe; use slot_clock::SlotClock; use std::marker::PhantomData; use std::sync::Arc; @@ -245,8 +245,8 @@ impl GossipVerifiedBlob { /// Wrapper over a `BlobSidecar` for which we have completed kzg verification. /// i.e. `verify_blob_kzg_proof(blob, commitment, proof) == true`. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedBlob { blob: Arc>, diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index bd1adb7e407..e8d4c75dcee 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -294,7 +294,7 @@ impl BlockTimesCache { #[cfg(test)] mod test { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; #[test] fn observed_time_uses_minimum() { diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 5078e24a51c..bca8d2bc57b 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -66,7 +66,8 @@ use crate::{ beacon_chain::{BeaconForkChoice, ForkChoiceError}, metrics, }; -use derivative::Derivative; +use bls::{PublicKey, PublicKeyBytes}; +use educe::Educe; use eth2::types::{BlockGossip, EventKind}; use execution_layer::PayloadStatus; pub use fork_choice::{AttestationFromBlock, PayloadVerificationStatus}; @@ -97,7 +98,7 @@ use tracing::{Instrument, Span, debug, debug_span, error, info_span, instrument} use types::{ BeaconBlockRef, BeaconState, BeaconStateError, BlobsList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, ExecutionBlockHash, FullPayload, Hash256, InconsistentFork, KzgProofs, - PublicKey, PublicKeyBytes, RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, data_column_sidecar::DataColumnSidecarError, }; @@ -689,8 +690,8 @@ pub fn signature_verify_chain_segment( /// A wrapper around a `SignedBeaconBlock` that indicates it has been approved for re-gossiping on /// the p2p network. -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Debug(bound(T: BeaconChainTypes)))] pub struct GossipVerifiedBlock { pub block: Arc>, pub block_root: Hash256, @@ -1164,9 +1165,9 @@ impl SignatureVerifiedBlock { block_root: Hash256, chain: &BeaconChain, ) -> Result> { - let header = block.signed_block_header(); + let arc_block = block.block_cloned(); Self::new(block, block_root, chain) - .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) + .map_err(|e| BlockSlashInfo::from_early_error_block(arc_block.signed_block_header(), e)) } /// Finishes signature verification on the provided `GossipVerifedBlock`. Does not re-verify @@ -1221,9 +1222,13 @@ impl SignatureVerifiedBlock { from: GossipVerifiedBlock, chain: &BeaconChain, ) -> Result> { - let header = from.block.signed_block_header(); - Self::from_gossip_verified_block(from, chain) - .map_err(|e| BlockSlashInfo::from_early_error_block(header, e)) + let block = from.block.clone(); + Self::from_gossip_verified_block(from, chain).map_err(|e| { + // Lazily create the header from the block in case of error. Computing the header + // involves some hashing and takes ~13ms which we DO NOT want to do on the hot path of + // block processing (prior to sending newPayload pre-Gloas). + BlockSlashInfo::from_early_error_block(block.signed_block_header(), e) + }) } pub fn block_root(&self) -> Hash256 { @@ -1248,12 +1253,12 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc chain: &Arc>, notify_execution_layer: NotifyExecutionLayer, ) -> Result, BlockSlashInfo> { - let header = self.block.signed_block_header(); + let arc_block = self.block.block_cloned(); let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { load_parent(self.block, chain) - .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? + .map_err(|e| BlockSlashInfo::SignatureValid(arc_block.signed_block_header(), e))? }; ExecutionPendingBlock::from_signature_verified_components( @@ -1264,7 +1269,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc chain, notify_execution_layer, ) - .map_err(|e| BlockSlashInfo::SignatureValid(header, e)) + .map_err(|e| BlockSlashInfo::SignatureValid(arc_block.signed_block_header(), e)) } fn block(&self) -> &SignedBeaconBlock { diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 1a0b188fdcd..5978e97c4d9 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -2,7 +2,7 @@ use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::{PayloadVerificationOutcome, get_block_root}; -use derivative::Derivative; +use educe::Educe; use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; @@ -26,8 +26,8 @@ use types::{ /// Note: We make a distinction over blocks received over gossip because /// in a post-deneb world, the blobs corresponding to a given block that are received /// over rpc do not contain the proposer signature for dos resistance. -#[derive(Clone, Derivative)] -#[derivative(Hash(bound = "E: EthSpec"))] +#[derive(Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] pub struct RpcBlock { block_root: Hash256, block: RpcBlockInner, @@ -80,8 +80,8 @@ impl RpcBlock { /// Note: This variant is intentionally private because we want to safely construct the /// internal variants after applying consistency checks to ensure that the block and blobs /// are consistent with respect to each other. -#[derive(Debug, Clone, Derivative)] -#[derivative(Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Educe)] +#[educe(Hash(bound(E: EthSpec)))] enum RpcBlockInner { /// Single block lookup response. This should potentially hit the data availability cache. Block(Arc>), diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index ef438b16e0f..58dbf1c35e8 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -21,7 +21,9 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; +use bls::Signature; use execution_layer::ExecutionLayer; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; use kzg::Kzg; @@ -43,7 +45,7 @@ use tracing::{debug, error, info}; use types::data_column_custody_group::CustodyIndex; use types::{ BeaconBlock, BeaconState, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecarList, - Epoch, EthSpec, FixedBytesExtended, Hash256, Signature, SignedBeaconBlock, Slot, + Epoch, EthSpec, Hash256, SignedBeaconBlock, Slot, }; /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index aa232502969..776fb50f619 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -12,6 +12,7 @@ use crate::{BeaconChainTypes, BlockProcessStatus}; use lighthouse_tracing::SPAN_PENDING_COMPONENTS; use lru::LruCache; use parking_lot::{MappedRwLockReadGuard, RwLock, RwLockReadGuard, RwLockWriteGuard}; +use ssz_types::{RuntimeFixedVector, RuntimeVariableList}; use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; @@ -20,8 +21,7 @@ use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, BlockImportSource, ChainSpec, ColumnIndex, DataColumnSidecar, - DataColumnSidecarList, Epoch, EthSpec, Hash256, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, + DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, }; #[derive(Clone)] @@ -1261,15 +1261,14 @@ mod pending_components_tests { use crate::PayloadVerificationOutcome; use crate::block_verification_types::BlockImportData; use crate::test_utils::{NumBlobs, generate_rand_block_and_blobs, test_spec}; + use fixed_bytes::FixedBytesExtended; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; use rand::SeedableRng; use rand::rngs::StdRng; use state_processing::ConsensusContext; use types::test_utils::TestRandom; - use types::{ - BeaconState, FixedBytesExtended, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot, - }; + use types::{BeaconState, ForkName, MainnetEthSpec, SignedBeaconBlock, Slot}; type E = MainnetEthSpec; @@ -1284,7 +1283,7 @@ mod pending_components_tests { let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); let spec = test_spec::(); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng, &spec); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); let max_len = spec.max_blobs_per_block(block.epoch()) as usize; let mut blobs: RuntimeFixedVector>>> = RuntimeFixedVector::default(max_len); diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index 61fc0677b15..b9986025667 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -4,7 +4,7 @@ use crate::block_verification::{ use crate::kzg_utils::{reconstruct_data_columns, validate_data_columns}; use crate::observed_data_sidecars::{ObservationStrategy, Observe}; use crate::{BeaconChain, BeaconChainError, BeaconChainTypes, metrics}; -use derivative::Derivative; +use educe::Educe; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; use proto_array::Block; @@ -296,8 +296,8 @@ impl GossipVerifiedDataColumn } /// Wrapper over a `DataColumnSidecar` for which we have completed kzg verification. -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedDataColumn { data: Arc>, @@ -353,8 +353,8 @@ pub type CustodyDataColumnList = VariableList, ::NumberOfColumns>; /// Data column that we must custody -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[ssz(struct_behaviour = "transparent")] pub struct CustodyDataColumn { data: Arc>, @@ -383,8 +383,8 @@ impl CustodyDataColumn { } /// Data column that we must custody and has completed kzg verification -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] +#[derive(Debug, Educe, Clone, Encode, Decode)] +#[educe(PartialEq, Eq)] #[ssz(struct_behaviour = "transparent")] pub struct KzgVerifiedCustodyDataColumn { data: Arc>, @@ -850,22 +850,6 @@ mod test { .build(); harness.advance_slot(); - // Check block generator timestamp conversion sanity. - { - let exec_block_generator = harness.execution_block_generator(); - assert_eq!( - exec_block_generator - .timestamp_to_slot_post_capella(exec_block_generator.osaka_time.unwrap()), - 0 - ); - assert_eq!( - exec_block_generator.timestamp_to_slot_post_capella( - exec_block_generator.osaka_time.unwrap() + harness.spec.seconds_per_slot - ), - 1 - ); - } - let verify_fn = |column_sidecar: DataColumnSidecar| { GossipVerifiedDataColumn::<_>::new_for_block_publishing( column_sidecar.into(), @@ -884,16 +868,16 @@ mod test { let state = harness.get_current_state(); let ((block, _blobs_opt), _state) = harness .make_block_with_modifier(state, slot, |block| { - *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].into(); + *block.body_mut().blob_kzg_commitments_mut().unwrap() = vec![].try_into().unwrap(); }) .await; let index = 0; let column_sidecar = DataColumnSidecar:: { index, - column: vec![].into(), - kzg_commitments: vec![].into(), - kzg_proofs: vec![].into(), + column: vec![].try_into().unwrap(), + kzg_commitments: vec![].try_into().unwrap(), + kzg_proofs: vec![].try_into().unwrap(), signed_block_header: block.signed_block_header(), kzg_commitments_inclusion_proof: block .message() @@ -930,7 +914,9 @@ mod test { let ((block, _blobs_opt), _state) = harness .make_block_with_modifier(state, slot, |block| { *block.body_mut().blob_kzg_commitments_mut().unwrap() = - vec![preloaded_commitments_single[0]; blob_count].into(); + vec![preloaded_commitments_single[0]; blob_count] + .try_into() + .unwrap(); }) .await; diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index d4eba2b0ea2..b021df2c33b 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -9,9 +9,11 @@ use crate::observed_aggregates::Error as ObservedAttestationsError; use crate::observed_attesters::Error as ObservedAttestersError; use crate::observed_block_producers::Error as ObservedBlockProducersError; use crate::observed_data_sidecars::Error as ObservedDataSidecarsError; +use bls::PublicKeyBytes; use execution_layer::PayloadStatus; use fork_choice::ExecutionStatus; use futures::channel::mpsc::TrySendError; +use milhouse::Error as MilhouseError; use operation_pool::OpPoolError; use safe_arith::ArithError; use ssz_types::Error as SszTypesError; @@ -28,7 +30,6 @@ use state_processing::{ }; use task_executor::ShutdownReason; use tokio::task::JoinError; -use types::milhouse::Error as MilhouseError; use types::*; macro_rules! easy_from_to { @@ -318,6 +319,9 @@ pub enum BlockProductionError { KzgError(kzg::Error), FailedToBuildBlobSidecars(String), MissingExecutionRequests, + SszTypesError(ssz_types::Error), + // TODO(gloas): Remove this once Gloas is implemented + GloasNotImplemented, } easy_from_to!(BlockProcessingError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs index e4855dd5598..cbe2f78fbda 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs/tests.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs/tests.rs @@ -576,7 +576,7 @@ fn create_test_block_and_blobs( .map(|(blob, proofs)| { BlobAndProof::V2(BlobAndProofV2 { blob, - proofs: proofs.to_vec().into(), + proofs: proofs.to_vec().try_into().unwrap(), }) }) .collect() diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index e8110d14cdc..85470715c9f 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -1,5 +1,6 @@ use crate::BeaconChain; use crate::BeaconChainTypes; +use eth2::types::GraffitiPolicy; use execution_layer::{CommitPrefix, ExecutionLayer, http::ENGINE_GET_CLIENT_VERSION_V1}; use logging::crit; use serde::{Deserialize, Serialize}; @@ -48,6 +49,25 @@ impl Debug for GraffitiOrigin { } } +pub enum GraffitiSettings { + Unspecified, + Specified { + graffiti: Graffiti, + policy: GraffitiPolicy, + }, +} + +impl GraffitiSettings { + pub fn new(validator_graffiti: Option, policy: Option) -> Self { + validator_graffiti + .map(|graffiti| Self::Specified { + graffiti, + policy: policy.unwrap_or(GraffitiPolicy::PreserveUserGraffiti), + }) + .unwrap_or(Self::Unspecified) + } +} + pub struct GraffitiCalculator { pub beacon_graffiti: GraffitiOrigin, execution_layer: Option>, @@ -73,11 +93,19 @@ impl GraffitiCalculator { /// 2. Graffiti specified by the user via beacon node CLI options. /// 3. The EL & CL client version string, applicable when the EL supports version specification. /// 4. The default lighthouse version string, used if the EL lacks version specification support. - pub async fn get_graffiti(&self, validator_graffiti: Option) -> Graffiti { - if let Some(graffiti) = validator_graffiti { - return graffiti; + pub async fn get_graffiti(&self, graffiti_settings: GraffitiSettings) -> Graffiti { + match graffiti_settings { + GraffitiSettings::Specified { graffiti, policy } => match policy { + GraffitiPolicy::PreserveUserGraffiti => graffiti, + GraffitiPolicy::AppendClientVersions => { + self.calculate_combined_graffiti(Some(graffiti)).await + } + }, + GraffitiSettings::Unspecified => self.calculate_combined_graffiti(None).await, } + } + async fn calculate_combined_graffiti(&self, validator_graffiti: Option) -> Graffiti { match self.beacon_graffiti { GraffitiOrigin::UserSpecified(graffiti) => graffiti, GraffitiOrigin::Calculated(default_graffiti) => { @@ -133,7 +161,7 @@ impl GraffitiCalculator { CommitPrefix("00000000".to_string()) }); - engine_version.calculate_graffiti(lighthouse_commit_prefix) + engine_version.calculate_graffiti(lighthouse_commit_prefix, validator_graffiti) } } } @@ -224,14 +252,17 @@ async fn engine_version_cache_refresh_service( #[cfg(test)] mod tests { use crate::ChainConfig; + use crate::graffiti_calculator::GraffitiSettings; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType, test_spec}; + use bls::Keypair; + use eth2::types::GraffitiPolicy; use execution_layer::EngineCapabilities; use execution_layer::test_utils::{DEFAULT_CLIENT_VERSION, DEFAULT_ENGINE_CAPABILITIES}; use std::sync::Arc; use std::sync::LazyLock; use std::time::Duration; use tracing::info; - use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, Keypair, MinimalEthSpec}; + use types::{ChainSpec, GRAFFITI_BYTES_LEN, Graffiti, MinimalEthSpec}; const VALIDATOR_COUNT: usize = 48; /// A cached set of keys. @@ -280,8 +311,12 @@ mod tests { let version_bytes = std::cmp::min(lighthouse_version::VERSION.len(), GRAFFITI_BYTES_LEN); // grab the slice of the graffiti that corresponds to the lighthouse version - let graffiti_slice = - &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; + let graffiti_slice = &harness + .chain + .graffiti_calculator + .get_graffiti(GraffitiSettings::Unspecified) + .await + .0[..version_bytes]; // convert graffiti bytes slice to ascii for easy debugging if this test should fail let graffiti_str = @@ -302,7 +337,12 @@ mod tests { let spec = Arc::new(test_spec::()); let harness = get_harness(VALIDATOR_COUNT, spec, None); - let found_graffiti_bytes = harness.chain.graffiti_calculator.get_graffiti(None).await.0; + let found_graffiti_bytes = harness + .chain + .graffiti_calculator + .get_graffiti(GraffitiSettings::Unspecified) + .await + .0; let mock_commit = DEFAULT_CLIENT_VERSION.commit.clone(); let expected_graffiti_string = format!( @@ -351,7 +391,10 @@ mod tests { let found_graffiti = harness .chain .graffiti_calculator - .get_graffiti(Some(Graffiti::from(graffiti_bytes))) + .get_graffiti(GraffitiSettings::new( + Some(Graffiti::from(graffiti_bytes)), + Some(GraffitiPolicy::PreserveUserGraffiti), + )) .await; assert_eq!( @@ -359,4 +402,98 @@ mod tests { "0x6e6963652067726166666974692062726f000000000000000000000000000000" ); } + + #[tokio::test] + async fn check_append_el_version_graffiti_various_length() { + let spec = Arc::new(test_spec::()); + let harness = get_harness(VALIDATOR_COUNT, spec, None); + + let graffiti_vec = vec![ + // less than 20 characters, example below is 19 characters + "This is my graffiti", + // 20-23 characters, example below is 22 characters + "This is my graffiti yo", + // 24-27 characters, example below is 26 characters + "This is my graffiti string", + // 28-29 characters, example below is 29 characters + "This is my graffiti string yo", + // 30-32 characters, example below is 32 characters + "This is my graffiti string yo yo", + ]; + + for graffiti in graffiti_vec { + let mut graffiti_bytes = [0; GRAFFITI_BYTES_LEN]; + graffiti_bytes[..graffiti.len()].copy_from_slice(graffiti.as_bytes()); + + // To test appending client version info with user specified graffiti + let policy = GraffitiPolicy::AppendClientVersions; + let found_graffiti_bytes = harness + .chain + .graffiti_calculator + .get_graffiti(GraffitiSettings::Specified { + graffiti: Graffiti::from(graffiti_bytes), + policy, + }) + .await + .0; + + let mock_commit = DEFAULT_CLIENT_VERSION.commit.clone(); + + let graffiti_length = graffiti.len(); + let append_graffiti_string = match graffiti_length { + 0..=19 => format!( + "{}{}{}{}", + DEFAULT_CLIENT_VERSION.code, + mock_commit + .strip_prefix("0x") + .unwrap_or("&mock_commit") + .get(0..4) + .expect("should get first 2 bytes in hex"), + "LH", + lighthouse_version::COMMIT_PREFIX + .get(0..4) + .expect("should get first 2 bytes in hex") + ), + 20..=23 => format!( + "{}{}{}{}", + DEFAULT_CLIENT_VERSION.code, + mock_commit + .strip_prefix("0x") + .unwrap_or("&mock_commit") + .get(0..2) + .expect("should get first 2 bytes in hex"), + "LH", + lighthouse_version::COMMIT_PREFIX + .get(0..2) + .expect("should get first 2 bytes in hex") + ), + 24..=27 => format!("{}{}", DEFAULT_CLIENT_VERSION.code, "LH",), + 28..=29 => DEFAULT_CLIENT_VERSION.code.to_string(), + // when user graffiti length is 30-32 characters, append nothing + 30..=32 => String::new(), + _ => panic!( + "graffiti length should be less than or equal to GRAFFITI_BYTES_LEN (32 characters)" + ), + }; + + let expected_graffiti_string = if append_graffiti_string.is_empty() { + // for the case of empty append_graffiti_string, i.e., user-specified graffiti is 30-32 characters + graffiti.to_string() + } else { + // There is a space between the client version info and user graffiti + // as defined in calculate_graffiti function in engine_api.rs + format!("{} {}", append_graffiti_string, graffiti) + }; + + let expected_graffiti_prefix_bytes = expected_graffiti_string.as_bytes(); + let expected_graffiti_prefix_len = + std::cmp::min(expected_graffiti_prefix_bytes.len(), GRAFFITI_BYTES_LEN); + + let found_graffiti_string = + std::str::from_utf8(&found_graffiti_bytes[..expected_graffiti_prefix_len]) + .expect("bytes should convert nicely to ascii"); + + assert_eq!(expected_graffiti_string, found_graffiti_string); + } + } } diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index e4040eea6b0..91b0f12cbb3 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,5 +1,6 @@ use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{BeaconChain, BeaconChainTypes, WhenSlotSkipped, metrics}; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use state_processing::{ per_block_processing::ParallelSignatureSets, @@ -12,7 +13,7 @@ use store::metadata::DataColumnInfo; use store::{AnchorInfo, BlobInfo, DBColumn, Error as StoreError, KeyValueStore, KeyValueStoreOp}; use strum::IntoStaticStr; use tracing::{debug, instrument}; -use types::{FixedBytesExtended, Hash256, Slot}; +use types::{Hash256, Slot}; /// Use a longer timeout on the pubkey cache. /// diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 382775ab50f..334124419b9 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -258,7 +258,8 @@ pub(crate) fn build_data_column_sidecars( .get(col) .ok_or(format!("Missing blob cell at index {col}"))?; let cell: Vec = cell.to_vec(); - let cell = Cell::::from(cell); + let cell = + Cell::::try_from(cell).map_err(|e| format!("BytesPerCell exceeded: {e:?}"))?; let proof = blob_cell_proofs .get(col) @@ -276,23 +277,27 @@ pub(crate) fn build_data_column_sidecars( } } - let sidecars: Vec>> = columns + let sidecars: Result>>, String> = columns .into_iter() .zip(column_kzg_proofs) .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: VariableList::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) + .map( + |(index, (col, proofs))| -> Result>, String> { + Ok(Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::try_from(col) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: VariableList::try_from(proofs) + .map_err(|e| format!("MaxBlobCommitmentsPerBlock exceeded: {e:?}"))?, + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + })) + }, + ) .collect(); - Ok(sidecars) + sidecars } /// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). @@ -303,12 +308,14 @@ pub(crate) fn build_data_column_sidecars( /// and it will be slow if the node needs to reconstruct the blobs pub fn reconstruct_blobs( kzg: &Kzg, - data_columns: &[Arc>], + mut data_columns: Vec>>, blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, spec: &ChainSpec, ) -> Result, String> { - // The data columns are from the database, so we assume their correctness. + // Sort data columns by index to ensure ascending order for KZG operations + data_columns.sort_unstable_by_key(|dc| dc.index); + let first_data_column = data_columns .first() .ok_or("data_columns should have at least one element".to_string())?; @@ -326,7 +333,7 @@ pub fn reconstruct_blobs( .map(|row_index| { let mut cells: Vec = vec![]; let mut cell_ids: Vec = vec![]; - for data_column in data_columns { + for data_column in &data_columns { let cell = data_column .column .get(row_index) @@ -458,12 +465,13 @@ mod test { test_reconstruct_data_columns(&kzg, &spec); test_reconstruct_data_columns_unordered(&kzg, &spec); test_reconstruct_blobs_from_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns_unordered(&kzg, &spec); test_validate_data_columns(&kzg, &spec); } #[track_caller] fn test_validate_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); @@ -489,7 +497,8 @@ mod test { #[track_caller] fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + // Using at least 2 blobs to make sure we're arranging the data columns correctly. + let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -529,6 +538,7 @@ mod test { #[track_caller] fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { + // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -552,6 +562,7 @@ mod test { #[track_caller] fn test_reconstruct_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { + // Using at least 2 blobs to make sure we're arranging the data columns correctly. let num_of_blobs = 2; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); @@ -573,7 +584,7 @@ mod test { #[track_caller] fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { - let num_of_blobs = 6; + let num_of_blobs = 3; let (signed_block, blobs, proofs) = create_test_fulu_block_and_blobs::(num_of_blobs, spec); let blob_refs = blobs.iter().collect::>(); @@ -583,10 +594,11 @@ mod test { // Now reconstruct let signed_blinded_block = signed_block.into(); - let blob_indices = vec![3, 4, 5]; + // Using at least 2 blobs to make sure we're arranging the data columns correctly. + let blob_indices = vec![1, 2]; let reconstructed_blobs = reconstruct_blobs( kzg, - &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + column_sidecars[0..column_sidecars.len() / 2].to_vec(), Some(blob_indices.clone()), &signed_blinded_block, spec, @@ -604,6 +616,31 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns_unordered(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 2; + let (signed_block, blobs, proofs) = + create_test_fulu_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, proofs.to_vec(), &signed_block, kzg, spec) + .unwrap(); + + // Test reconstruction with columns in reverse order (non-ascending) + let mut subset_columns: Vec<_> = + column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2].to_vec(); + subset_columns.reverse(); // This would fail without proper sorting in reconstruct_blobs + + let signed_blinded_block = signed_block.into(); + let reconstructed_blobs = + reconstruct_blobs(kzg, subset_columns, None, &signed_blinded_block, spec).unwrap(); + + for (i, original_blob) in blobs.iter().enumerate() { + let reconstructed_blob = &reconstructed_blobs.get(i).unwrap().blob; + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg") } diff --git a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs index fe62b8ef903..2dc4de7d04b 100644 --- a/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_finality_update_verification.rs @@ -1,5 +1,5 @@ use crate::{BeaconChain, BeaconChainTypes}; -use derivative::Derivative; +use educe::Educe; use slot_clock::SlotClock; use std::time::Duration; use strum::AsRefStr; @@ -55,8 +55,8 @@ pub enum Error { } /// Wraps a `LightClientFinalityUpdate` that has been verified for propagation on the gossip network. -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedLightClientFinalityUpdate { light_client_finality_update: LightClientFinalityUpdate, seen_timestamp: Duration, diff --git a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs index b59390ea0c4..4079a374f89 100644 --- a/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs +++ b/beacon_node/beacon_chain/src/light_client_optimistic_update_verification.rs @@ -1,5 +1,5 @@ use crate::{BeaconChain, BeaconChainTypes}; -use derivative::Derivative; +use educe::Educe; use eth2::types::Hash256; use slot_clock::SlotClock; use std::time::Duration; @@ -49,8 +49,8 @@ pub enum Error { } /// Wraps a `LightClientOptimisticUpdate` that has been verified for propagation on the gossip network. -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedLightClientOptimisticUpdate { light_client_optimistic_update: LightClientOptimisticUpdate, pub parent_root: Hash256, diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8f1da7b67b7..e6557c7a270 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1185,7 +1185,7 @@ pub static VALIDATOR_MONITOR_UNAGGREGATED_ATTESTATION_DELAY_SECONDS: LazyLock< > = LazyLock::new(|| { try_create_histogram_vec( "validator_monitor_unaggregated_attestation_delay_seconds", - "The delay between when the validator should send the attestation and when it was received.", + "The delay between when the validator sent the attestation and the start of the slot.", &["src", "validator"], ) }); diff --git a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs index 4c4478d17e6..beefc2d678b 100644 --- a/beacon_node/beacon_chain/src/naive_aggregation_pool.rs +++ b/beacon_node/beacon_chain/src/naive_aggregation_pool.rs @@ -577,12 +577,11 @@ where #[cfg(test)] mod tests { use super::*; - use ssz_types::BitList; - use store::BitVector; + use fixed_bytes::FixedBytesExtended; + use ssz_types::{BitList, BitVector}; use tree_hash::TreeHash; use types::{ - Attestation, AttestationBase, AttestationElectra, FixedBytesExtended, Fork, Hash256, - SyncCommitteeMessage, + Attestation, AttestationBase, AttestationElectra, Fork, Hash256, SyncCommitteeMessage, test_utils::{generate_deterministic_keypair, test_random_instance}, }; diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index f6f62e1b73b..b2c5cb4b38a 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -473,7 +473,8 @@ where #[cfg(not(debug_assertions))] mod tests { use super::*; - use types::{AttestationBase, FixedBytesExtended, Hash256, test_utils::test_random_instance}; + use fixed_bytes::FixedBytesExtended; + use types::{AttestationBase, Hash256, test_utils::test_random_instance}; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index 34d68fe3ac0..d5433f49d1b 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -19,8 +19,9 @@ use bitvec::vec::BitVec; use std::collections::{HashMap, HashSet}; use std::hash::Hash; use std::marker::PhantomData; +use typenum::Unsigned; use types::slot_data::SlotData; -use types::{Epoch, EthSpec, Hash256, Slot, Unsigned}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The maximum capacity of the `AutoPruningEpochContainer`. /// @@ -619,7 +620,7 @@ impl SlotSubcommitteeIndex { #[cfg(test)] mod tests { use super::*; - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; type E = types::MainnetEthSpec; diff --git a/beacon_node/beacon_chain/src/observed_block_producers.rs b/beacon_node/beacon_chain/src/observed_block_producers.rs index 096c8bff77d..b740735ac41 100644 --- a/beacon_node/beacon_chain/src/observed_block_producers.rs +++ b/beacon_node/beacon_chain/src/observed_block_producers.rs @@ -4,7 +4,8 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{BeaconBlockRef, Epoch, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/beacon_node/beacon_chain/src/observed_operations.rs b/beacon_node/beacon_chain/src/observed_operations.rs index 49614c5b542..4ca5371242c 100644 --- a/beacon_node/beacon_chain/src/observed_operations.rs +++ b/beacon_node/beacon_chain/src/observed_operations.rs @@ -1,4 +1,4 @@ -use derivative::Derivative; +use educe::Educe; use smallvec::{SmallVec, smallvec}; use state_processing::{SigVerifiedOp, TransformPersist, VerifyOperation, VerifyOperationAt}; use std::collections::HashSet; @@ -14,8 +14,8 @@ pub const SMALL_VEC_SIZE: usize = 8; /// Stateful tracker for exit/slashing operations seen on the network. /// /// Implements the conditions for gossip verification of exits and slashings from the P2P spec. -#[derive(Debug, Derivative)] -#[derivative(Default(bound = "T: ObservableOperation, E: EthSpec"))] +#[derive(Debug, Educe)] +#[educe(Default(bound(T: ObservableOperation, E: EthSpec)))] pub struct ObservedOperations, E: EthSpec> { /// Indices of validators for whom we have already seen an instance of an operation `T`. /// @@ -26,7 +26,7 @@ pub struct ObservedOperations, E: EthSpec> { /// `attestation_1.attester_indices` and `attestation_2.attester_indices`. observed_validator_indices: HashSet, /// The name of the current fork. The default will be overwritten on first use. - #[derivative(Default(value = "ForkName::Base"))] + #[educe(Default(expression = ForkName::Base))] current_fork: ForkName, _phantom: PhantomData<(T, E)>, } diff --git a/beacon_node/beacon_chain/src/observed_slashable.rs b/beacon_node/beacon_chain/src/observed_slashable.rs index 001a0d4a867..704d605436b 100644 --- a/beacon_node/beacon_chain/src/observed_slashable.rs +++ b/beacon_node/beacon_chain/src/observed_slashable.rs @@ -5,7 +5,8 @@ use crate::observed_block_producers::Error; use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; -use types::{EthSpec, Hash256, Slot, Unsigned}; +use typenum::Unsigned; +use types::{EthSpec, Hash256, Slot}; #[derive(Eq, Hash, PartialEq, Debug, Default)] pub struct ProposalKey { diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs index e8bd526e19f..e238e1efb6c 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v23.rs @@ -122,7 +122,7 @@ pub fn downgrade_from_v23( let heads = fork_choice .proto_array() - .heads_descended_from_finalization::(); + .heads_descended_from_finalization::(fork_choice.finalized_checkpoint()); let head_roots = heads.iter().map(|node| node.root).collect(); let head_slots = heads.iter().map(|node| node.slot).collect(); diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 22921147a68..618d459754d 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -290,6 +290,7 @@ impl BlockShufflingIds { #[cfg(not(debug_assertions))] #[cfg(test)] mod test { + use fixed_bytes::FixedBytesExtended; use types::*; use crate::test_utils::EphemeralHarnessType; diff --git a/beacon_node/beacon_chain/src/single_attestation.rs b/beacon_node/beacon_chain/src/single_attestation.rs index 33a093687e5..955eb98e92a 100644 --- a/beacon_node/beacon_chain/src/single_attestation.rs +++ b/beacon_node/beacon_chain/src/single_attestation.rs @@ -1,7 +1,7 @@ use crate::attestation_verification::Error; +use ssz_types::{BitList, BitVector}; use types::{ - Attestation, AttestationBase, AttestationElectra, BitList, BitVector, EthSpec, ForkName, - SingleAttestation, + Attestation, AttestationBase, AttestationElectra, EthSpec, ForkName, SingleAttestation, }; pub fn single_attestation_to_attestation( diff --git a/beacon_node/beacon_chain/src/sync_committee_verification.rs b/beacon_node/beacon_chain/src/sync_committee_verification.rs index 41d29d5526e..e74e284e583 100644 --- a/beacon_node/beacon_chain/src/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/src/sync_committee_verification.rs @@ -30,8 +30,9 @@ use crate::observed_attesters::SlotSubcommitteeIndex; use crate::{ BeaconChain, BeaconChainError, BeaconChainTypes, metrics, observed_aggregates::ObserveOutcome, }; +use bls::AggregateSignature; use bls::{PublicKeyBytes, verify_signature_sets}; -use derivative::Derivative; +use educe::Educe; use safe_arith::ArithError; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; @@ -49,9 +50,9 @@ use tree_hash_derive::TreeHash; use types::ChainSpec; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::slot_data::SlotData; -use types::sync_committee::Error as SyncCommitteeError; +use types::sync_committee::SyncCommitteeError; use types::{ - AggregateSignature, BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, + BeaconStateError, EthSpec, Hash256, SignedContributionAndProof, Slot, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, sync_committee_contribution::Error as ContributionError, }; @@ -261,8 +262,8 @@ impl From for Error { } /// Wraps a `SignedContributionAndProof` that has been verified for propagation on the gossip network.\ -#[derive(Derivative)] -#[derivative(Clone(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Clone(bound(T: BeaconChainTypes)))] pub struct VerifiedSyncContribution { signed_aggregate: SignedContributionAndProof, participant_pubkeys: Vec, diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index b626fcd862c..6d17d6d85c5 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -2,6 +2,7 @@ use crate::blob_verification::GossipVerifiedBlob; use crate::block_verification_types::{AsBlock, RpcBlock}; use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; +use crate::graffiti_calculator::GraffitiSettings; use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; @@ -20,7 +21,10 @@ pub use crate::{ validator_monitor::{ValidatorMonitor, ValidatorMonitorConfig}, }; use bls::get_withdrawal_credentials; -use eth2::types::SignedBlockContentsTuple; +use bls::{ + AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, +}; +use eth2::types::{GraffitiPolicy, SignedBlockContentsTuple}; use execution_layer::test_utils::generate_genesis_header; use execution_layer::{ ExecutionLayer, @@ -30,6 +34,7 @@ use execution_layer::{ MockExecutionLayer, }, }; +use fixed_bytes::FixedBytesExtended; use futures::channel::mpsc::Receiver; pub use genesis::{DEFAULT_ETH1_BLOCK_HASH, InteropGenesisBuilder}; use int_to_bytes::int_to_bytes32; @@ -46,6 +51,7 @@ use rand::seq::SliceRandom; use rayon::prelude::*; use sensitive_url::SensitiveUrl; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::{RuntimeVariableList, VariableList}; use state_processing::per_block_processing::compute_timestamp_at_slot; use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; @@ -60,12 +66,13 @@ use store::{HotColdDB, ItemStore, MemoryStore, config::StoreConfig}; use task_executor::TaskExecutor; use task_executor::{ShutdownReason, test_utils::TestRuntime}; use tree_hash::TreeHash; +use typenum::U4294967296; use types::data_column_custody_group::CustodyIndex; use types::indexed_attestation::IndexedAttestationBase; use types::payload::BlockProductionVersion; use types::test_utils::TestRandom; pub use types::test_utils::generate_deterministic_keypairs; -use types::{typenum::U4294967296, *}; +use types::*; // 4th September 2019 pub const HARNESS_GENESIS_TIME: u64 = 1_567_552_690; @@ -83,6 +90,10 @@ pub const TEST_DATA_COLUMN_SIDECARS_SSZ: &[u8] = // a different value. pub const DEFAULT_TARGET_AGGREGATORS: u64 = u64::MAX; +// Minimum and maximum number of blobs to generate in each slot when using the `NumBlobs::Random` option (default). +const DEFAULT_MIN_BLOBS: usize = 1; +const DEFAULT_MAX_BLOBS: usize = 2; + static KZG: LazyLock> = LazyLock::new(|| { let kzg = Kzg::new_from_trusted_setup(&get_trusted_setup()).expect("should create kzg"); Arc::new(kzg) @@ -174,23 +185,28 @@ fn make_rng() -> Mutex { Mutex::new(StdRng::seed_from_u64(0x0DDB1A5E5BAD5EEDu64)) } -/// Return a `ChainSpec` suitable for test usage. -/// -/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment -/// variable. Otherwise use the default spec. -pub fn test_spec() -> ChainSpec { - let mut spec = if cfg!(feature = "fork_from_env") { +pub fn fork_name_from_env() -> Option { + if cfg!(feature = "fork_from_env") { let fork_name = std::env::var(FORK_NAME_ENV_VAR).unwrap_or_else(|e| { panic!( "{} env var must be defined when using fork_from_env: {:?}", FORK_NAME_ENV_VAR, e ) }); - let fork = ForkName::from_str(fork_name.as_str()).unwrap(); - fork.make_genesis_spec(E::default_spec()) + Some(ForkName::from_str(fork_name.as_str()).unwrap()) } else { - E::default_spec() - }; + None + } +} + +/// Return a `ChainSpec` suitable for test usage. +/// +/// If the `fork_from_env` feature is enabled, read the fork to use from the FORK_NAME environment +/// variable. Otherwise use the default spec. +pub fn test_spec() -> ChainSpec { + let mut spec = fork_name_from_env() + .map(|fork| fork.make_genesis_spec(E::default_spec())) + .unwrap_or_else(|| E::default_spec()); // Set target aggregators to a high value by default. spec.target_aggregators_per_committee = DEFAULT_TARGET_AGGREGATORS; @@ -928,6 +944,8 @@ where // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); @@ -941,7 +959,7 @@ where None, slot, randao_reveal, - Some(graffiti), + graffiti_settings, ProduceBlockVerification::VerifyRandao, builder_boost_factor, BlockProductionVersion::V3, @@ -985,6 +1003,8 @@ where // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); @@ -995,7 +1015,7 @@ where None, slot, randao_reveal, - Some(graffiti), + graffiti_settings, ProduceBlockVerification::VerifyRandao, None, BlockProductionVersion::FullV2, @@ -1044,6 +1064,8 @@ where // BeaconChain errors out with `DuplicateFullyImported`. Vary the graffiti so that we produce // different blocks each time. let graffiti = Graffiti::from(self.rng.lock().random::<[u8; 32]>()); + let graffiti_settings = + GraffitiSettings::new(Some(graffiti), Some(GraffitiPolicy::PreserveUserGraffiti)); let randao_reveal = self.sign_randao_reveal(&state, proposer_index, slot); @@ -1056,7 +1078,7 @@ where None, slot, randao_reveal, - Some(graffiti), + graffiti_settings, ProduceBlockVerification::VerifyRandao, None, BlockProductionVersion::FullV2, @@ -2318,7 +2340,7 @@ where .collect::>(); // Building a VarList from leaves - let deposit_data_list = VariableList::<_, U4294967296>::from(leaves.clone()); + let deposit_data_list = VariableList::<_, U4294967296>::try_from(leaves.clone()).unwrap(); // Setting the deposit_root to be the tree_hash_root of the VarList state.eth1_data_mut().deposit_root = deposit_data_list.tree_hash_root(); @@ -2342,7 +2364,7 @@ where let deposits = datas .into_par_iter() .zip(proofs.into_par_iter()) - .map(|(data, proof)| (data, proof.into())) + .map(|(data, proof)| (data, proof.try_into().unwrap())) .map(|(data, proof)| Deposit { proof, data }) .collect::>(); @@ -2908,7 +2930,6 @@ where let chain_dump = self.chain.chain_dump().unwrap(); chain_dump .iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root) .filter(|block_hash| *block_hash != Hash256::zero()) .map(|hash| hash.into()) @@ -3239,96 +3260,47 @@ pub enum NumBlobs { None, } +macro_rules! add_blob_transactions { + ($message:expr, $payload_type:ty, $num_blobs:expr, $rng:expr, $fork_name:expr) => {{ + let num_blobs = match $num_blobs { + NumBlobs::Random => $rng.random_range(DEFAULT_MIN_BLOBS..=DEFAULT_MAX_BLOBS), + NumBlobs::Number(n) => n, + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs, $fork_name).unwrap(); + + let payload: &mut $payload_type = &mut $message.body.execution_payload; + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + $message.body.blob_kzg_commitments = bundle.commitments.clone(); + bundle + }}; +} + pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, - spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); - let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); - let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; + let mut block = SignedBeaconBlock::from_block(inner, Signature::random_for_test(rng)); let mut blob_sidecars = vec![]; let bundle = match block { SignedBeaconBlock::Deneb(SignedBeaconBlockDeneb { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadDeneb, num_blobs, rng, fork_name), SignedBeaconBlock::Electra(SignedBeaconBlockElectra { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadElectra, num_blobs, rng, fork_name), SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } - SignedBeaconBlock::Gloas(SignedBeaconBlockGloas { - ref mut message, .. - }) => { - // Get either zero blobs or a random number of blobs between 1 and Max Blobs. - let payload: &mut FullPayloadGloas = &mut message.body.execution_payload; - let num_blobs = match num_blobs { - NumBlobs::Random => rng.random_range(1..=max_blobs), - NumBlobs::Number(n) => n, - NumBlobs::None => 0, - }; - let (bundle, transactions) = - execution_layer::test_utils::generate_blobs::(num_blobs, fork_name).unwrap(); - payload.execution_payload.transactions = <_>::default(); - for tx in Vec::from(transactions) { - payload.execution_payload.transactions.push(tx).unwrap(); - } - message.body.blob_kzg_commitments = bundle.commitments.clone(); - bundle - } + }) => add_blob_transactions!(message, FullPayloadFulu, num_blobs, rng, fork_name), + // TODO(EIP-7732) Add `SignedBeaconBlock::Gloas` variant _ => return (block, blob_sidecars), }; @@ -3369,7 +3341,7 @@ pub fn generate_rand_block_and_data_columns( SignedBeaconBlock>, DataColumnSidecarList, ) { - let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); + let (block, _blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); let data_columns = generate_data_column_sidecars_from_block(&block, spec); (block, data_columns) } diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 00c30e5ab1d..2a76d65d328 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -4,6 +4,7 @@ use crate::beacon_proposer_cache::{BeaconProposerCache, TYPICAL_SLOTS_PER_EPOCH}; use crate::metrics; +use bls::PublicKeyBytes; use itertools::Itertools; use logging::crit; use parking_lot::{Mutex, RwLock}; @@ -28,9 +29,10 @@ use types::consts::altair::{ use types::{ Attestation, AttestationData, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, - IndexedAttestationRef, ProposerSlashing, PublicKeyBytes, SignedAggregateAndProof, - SignedContributionAndProof, Slot, SyncCommitteeMessage, VoluntaryExit, + IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, SignedContributionAndProof, + Slot, SyncCommitteeMessage, VoluntaryExit, }; + /// Used for Prometheus labels. /// /// We've used `total` for this value to align with Nimbus, as per: @@ -1214,7 +1216,7 @@ impl ValidatorMonitor { let delay = get_message_delay_ms( seen_timestamp, data.slot, - slot_clock.unagg_attestation_production_delay(), + Duration::from_secs(0), slot_clock, ); diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs index 39d2c2c2d76..26ac02d91b4 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/beacon_chain/src/validator_pubkey_cache.rs @@ -1,13 +1,17 @@ use crate::errors::BeaconChainError; use crate::{BeaconChainTypes, BeaconStore}; use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use bls::{PublicKey, PublicKeyBytes}; +use fixed_bytes::FixedBytesExtended; +use rayon::prelude::*; use smallvec::SmallVec; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::collections::HashMap; use std::marker::PhantomData; use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; -use types::{BeaconState, FixedBytesExtended, Hash256, PublicKey, PublicKeyBytes}; +use tracing::instrument; +use types::{BeaconState, Hash256}; /// Provides a mapping of `validator_index -> validator_publickey`. /// @@ -28,6 +32,7 @@ impl ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// /// The new cache will be updated with the keys from `state` and immediately written to disk. + #[instrument(name = "validator_pubkey_cache_new", skip_all)] pub fn new( state: &BeaconState, store: BeaconStore, @@ -46,6 +51,7 @@ impl ValidatorPubkeyCache { } /// Load the pubkey cache from the given on-disk database. + #[instrument(name = "validator_pubkey_cache_load_from_store", skip_all)] pub fn load_from_store(store: BeaconStore) -> Result { let mut pubkeys = vec![]; let mut indices = HashMap::new(); @@ -77,6 +83,7 @@ impl ValidatorPubkeyCache { /// Does not delete any keys from `self` if they don't appear in `state`. /// /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. + #[instrument(skip_all)] pub fn import_new_pubkeys( &mut self, state: &BeaconState, @@ -106,29 +113,58 @@ impl ValidatorPubkeyCache { self.indices.reserve(validator_keys.len()); let mut store_ops = Vec::with_capacity(validator_keys.len()); - for pubkey_bytes in validator_keys { - let i = self.pubkeys.len(); - if self.indices.contains_key(&pubkey_bytes) { - return Err(BeaconChainError::DuplicateValidatorPublicKey); + let is_initial_import = self.pubkeys.is_empty(); + + // Helper to insert a decompressed key + let mut insert_key = + |pubkey_bytes: PublicKeyBytes, pubkey: PublicKey| -> Result<(), BeaconChainError> { + let i = self.pubkeys.len(); + + if self.indices.contains_key(&pubkey_bytes) { + return Err(BeaconChainError::DuplicateValidatorPublicKey); + } + + // Stage the new validator key for writing to disk. + // It will be committed atomically when the block that introduced it is written to disk. + // Notably it is NOT written while the write lock on the cache is held. + // See: https://github.com/sigp/lighthouse/issues/2327 + store_ops.push(StoreOp::KeyValueOp( + DatabasePubkey::from_pubkey(&pubkey) + .as_kv_store_op(DatabasePubkey::key_for_index(i)), + )); + + self.pubkeys.push(pubkey); + self.pubkey_bytes.push(pubkey_bytes); + self.indices.insert(pubkey_bytes, i); + Ok(()) + }; + + if is_initial_import { + // On first startup, decompress keys in parallel for better performance + let validator_keys_vec: Vec = validator_keys.collect(); + + let decompressed: Vec<(PublicKeyBytes, PublicKey)> = validator_keys_vec + .into_par_iter() + .map(|pubkey_bytes| { + let pubkey = (&pubkey_bytes) + .try_into() + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + Ok((pubkey_bytes, pubkey)) + }) + .collect::, BeaconChainError>>()?; + + for (pubkey_bytes, pubkey) in decompressed { + insert_key(pubkey_bytes, pubkey)?; + } + } else { + // Sequential path for incremental updates + for pubkey_bytes in validator_keys { + let pubkey = (&pubkey_bytes) + .try_into() + .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; + insert_key(pubkey_bytes, pubkey)?; } - - let pubkey = (&pubkey_bytes) - .try_into() - .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?; - - // Stage the new validator key for writing to disk. - // It will be committed atomically when the block that introduced it is written to disk. - // Notably it is NOT written while the write lock on the cache is held. - // See: https://github.com/sigp/lighthouse/issues/2327 - store_ops.push(StoreOp::KeyValueOp( - DatabasePubkey::from_pubkey(&pubkey) - .as_kv_store_op(DatabasePubkey::key_for_index(i)), - )); - - self.pubkeys.push(pubkey); - self.pubkey_bytes.push(pubkey_bytes); - self.indices.insert(pubkey_bytes, i); } Ok(store_ops) @@ -210,10 +246,11 @@ impl DatabasePubkey { mod test { use super::*; use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use bls::Keypair; use logging::create_test_tracing_subscriber; use std::sync::Arc; use store::HotColdDB; - use types::{EthSpec, Keypair, MainnetEthSpec}; + use types::{EthSpec, MainnetEthSpec}; type E = MainnetEthSpec; type T = EphemeralHarnessType; @@ -324,4 +361,39 @@ mod test { let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); } + + #[test] + fn parallel_import_maintains_order() { + // Test that parallel decompression on first startup maintains correct order and indices + let (state, keypairs) = get_state(100); + let store = get_store(); + + // Create cache from empty state (triggers parallel path) + let cache: ValidatorPubkeyCache = + ValidatorPubkeyCache::new(&state, store).expect("should create cache"); + + check_cache_get(&cache, &keypairs[..]); + } + + #[test] + fn incremental_import_maintains_order() { + // Test that incremental imports maintain correct order (triggers sequential path) + let store = get_store(); + + // Start with 50 validators + let (state1, keypairs1) = get_state(50); + let mut cache = + ValidatorPubkeyCache::new(&state1, store.clone()).expect("should create cache"); + check_cache_get(&cache, &keypairs1[..]); + + // Add 50 more validators + let (state2, keypairs2) = get_state(100); + let ops = cache + .import_new_pubkeys(&state2) + .expect("should import pubkeys"); + store.do_atomically_with_block_and_blobs_cache(ops).unwrap(); + + // Verify all 100 validators are correctly indexed + check_cache_get(&cache, &keypairs2[..]); + } } diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 0acb23d5126..017c249d10b 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -4,11 +4,10 @@ use beacon_chain::attestation_simulator::produce_unaggregated_attestation; use beacon_chain::test_utils::{AttestationStrategy, BeaconChainHarness, BlockStrategy}; use beacon_chain::validator_monitor::UNAGGREGATED_ATTESTATION_LAG_SLOTS; use beacon_chain::{StateSkipConfig, WhenSlotSkipped, metrics}; +use bls::{AggregateSignature, Keypair}; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; -use types::{ - AggregateSignature, Attestation, EthSpec, Keypair, MainnetEthSpec, RelativeEpoch, Slot, -}; +use types::{Attestation, EthSpec, MainnetEthSpec, RelativeEpoch, Slot}; pub const VALIDATOR_COUNT: usize = 16; diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 706ffad3c1a..7984ea47081 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -13,15 +13,17 @@ use beacon_chain::{ single_attestation_to_attestation, test_spec, }, }; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use genesis::{DEFAULT_ETH1_BLOCK_HASH, interop_genesis_state}; use int_to_bytes::int_to_bytes32; use state_processing::per_slot_processing; use std::sync::{Arc, LazyLock}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - Address, AggregateSignature, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, - FixedBytesExtended, ForkName, Hash256, Keypair, MainnetEthSpec, SecretKey, SelectionProof, - SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, Unsigned, + Address, Attestation, AttestationRef, ChainSpec, Epoch, EthSpec, ForkName, Hash256, + MainnetEthSpec, SelectionProof, SignedAggregateAndProof, SingleAttestation, Slot, SubnetId, signed_aggregate_and_proof::SignedAggregateAndProofRefMut, test_utils::generate_deterministic_keypair, }; diff --git a/beacon_node/beacon_chain/tests/blob_verification.rs b/beacon_node/beacon_chain/tests/blob_verification.rs index c42a2828c01..d1a0d87adf1 100644 --- a/beacon_node/beacon_chain/tests/blob_verification.rs +++ b/beacon_node/beacon_chain/tests/blob_verification.rs @@ -7,6 +7,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::{blob_sidecar::FixedBlobSidecarList, *}; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 7dfef50ea11..2644b74b28e 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -13,6 +13,8 @@ use beacon_chain::{ BeaconSnapshot, BlockError, ChainConfig, ChainSegmentResult, IntoExecutionPendingBlock, InvalidSignature, NotifyExecutionLayer, }; +use bls::{AggregateSignature, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use slasher::{Config as SlasherConfig, Slasher}; use state_processing::{ @@ -297,19 +299,20 @@ async fn chain_segment_full_segment() { #[tokio::test] async fn chain_segment_varying_chunk_size() { - for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { + let (chain_segment, chain_segment_blobs) = get_chain_segment().await; + let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) + .into_iter() + .collect(); + + for chunk_size in &[1, 2, 31, 32, 33] { let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); - let (chain_segment, chain_segment_blobs) = get_chain_segment().await; - let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) - .into_iter() - .collect(); harness .chain .slot_clock .set_slot(blocks.last().unwrap().slot().as_u64()); - for chunk in blocks.chunks(*chunk_size) { + for chunk in blocks.clone().chunks(*chunk_size) { harness .chain .process_chain_segment(chunk.to_vec(), NotifyExecutionLayer::Yes) @@ -707,7 +710,7 @@ async fn invalid_signature_attester_slashing() { let attester_slashing = if fork_name.electra_enabled() { let indexed_attestation = IndexedAttestationElectra { - attesting_indices: vec![0].into(), + attesting_indices: vec![0].try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -731,7 +734,7 @@ async fn invalid_signature_attester_slashing() { AttesterSlashing::Electra(attester_slashing) } else { let indexed_attestation = IndexedAttestationBase { - attesting_indices: vec![0].into(), + attesting_indices: vec![0].try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -898,7 +901,9 @@ async fn invalid_signature_deposit() { let harness = get_invalid_sigs_harness(&chain_segment).await; let mut snapshots = chain_segment.clone(); let deposit = Deposit { - proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1].into(), + proof: vec![Hash256::zero(); DEPOSIT_TREE_DEPTH + 1] + .try_into() + .unwrap(), data: DepositData { pubkey: Keypair::random().pk.into(), withdrawal_credentials: Hash256::zero(), @@ -1270,7 +1275,9 @@ async fn block_gossip_verification() { as usize; if let Ok(kzg_commitments) = block.body_mut().blob_kzg_commitments_mut() { - *kzg_commitments = vec![KzgCommitment::empty_for_testing(); kzg_commitments_len + 1].into(); + *kzg_commitments = vec![KzgCommitment::empty_for_testing(); kzg_commitments_len + 1] + .try_into() + .unwrap(); assert!( matches!( unwrap_err(harness.chain.verify_block_for_gossip(Arc::new(SignedBeaconBlock::from_block(block, signature))).await), diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 229ae1e1998..be9b3b2fa12 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AvailabilityProcessingStatus, BlockError, ChainConfig, InvalidSignature, NotifyExecutionLayer, block_verification_types::AsBlock, }; +use bls::{Keypair, Signature}; use logging::create_test_tracing_subscriber; use std::sync::{Arc, LazyLock}; use types::*; diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index 466058eea38..86bdb03dafd 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -1,20 +1,26 @@ use beacon_chain::blob_verification::GossipVerifiedBlob; use beacon_chain::data_column_verification::GossipVerifiedDataColumn; -use beacon_chain::test_utils::{BeaconChainHarness, generate_data_column_sidecars_from_block}; +use beacon_chain::test_utils::{ + BeaconChainHarness, fork_name_from_env, generate_data_column_sidecars_from_block, test_spec, +}; use eth2::types::{EventKind, SseBlobSidecar, SseDataColumnSidecar}; use rand::SeedableRng; use rand::rngs::StdRng; use std::sync::Arc; use types::blob_sidecar::FixedBlobSidecarList; use types::test_utils::TestRandom; -use types::{BlobSidecar, DataColumnSidecar, EthSpec, ForkName, MinimalEthSpec, Slot}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, MinimalEthSpec, Slot}; type E = MinimalEthSpec; /// Verifies that a blob event is emitted when a gossip verified blob is received via gossip or the publish block API. #[tokio::test] async fn blob_sidecar_event_on_process_gossip_blob() { - let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -48,7 +54,11 @@ async fn blob_sidecar_event_on_process_gossip_blob() { /// Verifies that a data column event is emitted when a gossip verified data column is received via gossip or the publish block API. #[tokio::test] async fn data_column_sidecar_event_on_process_gossip_data_column() { - let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -93,7 +103,11 @@ async fn data_column_sidecar_event_on_process_gossip_data_column() { /// Verifies that a blob event is emitted when blobs are received via RPC. #[tokio::test] async fn blob_sidecar_event_on_process_rpc_blobs() { - let spec = Arc::new(ForkName::Deneb.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.deneb_enabled() || f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec) .deterministic_keypairs(8) @@ -112,7 +126,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let slot = head_state.slot() + 1; let ((signed_block, opt_blobs), _) = harness.make_block(head_state, slot).await; let (kzg_proofs, blobs) = opt_blobs.unwrap(); - assert!(blobs.len() > 2); + assert_eq!(blobs.len(), 2); let blob_1 = Arc::new(BlobSidecar::new(0, blobs[0].clone(), &signed_block, kzg_proofs[0]).unwrap()); @@ -144,7 +158,11 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { #[tokio::test] async fn data_column_sidecar_event_on_process_rpc_columns() { - let spec = Arc::new(ForkName::Fulu.make_genesis_spec(E::default_spec())); + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = Arc::new(test_spec::()); let harness = BeaconChainHarness::builder(E::default()) .spec(spec.clone()) .deterministic_keypairs(8) diff --git a/beacon_node/beacon_chain/tests/op_verification.rs b/beacon_node/beacon_chain/tests/op_verification.rs index c18af0bde70..2f97f10745e 100644 --- a/beacon_node/beacon_chain/tests/op_verification.rs +++ b/beacon_node/beacon_chain/tests/op_verification.rs @@ -9,6 +9,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, test_spec, }, }; +use bls::Keypair; use state_processing::per_block_processing::errors::{ AttesterSlashingInvalid, BlockOperationError, ExitInvalid, ProposerSlashingInvalid, }; diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index 0a5881e486b..ee9cf511ea5 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -7,8 +7,9 @@ use beacon_chain::test_utils::{ use beacon_chain::{ BlockError, ChainConfig, StateSkipConfig, WhenSlotSkipped, test_utils::{AttestationStrategy, BlockStrategy, RelativeSyncCommittee}, - types::{Epoch, EthSpec, Keypair, MinimalEthSpec}, + types::{Epoch, EthSpec, MinimalEthSpec}, }; +use bls::Keypair; use eth2::types::{StandardAttestationRewards, TotalAttestationRewards, ValidatorId}; use state_processing::{BlockReplayError, BlockReplayer}; use std::array::IntoIter; diff --git a/beacon_node/beacon_chain/tests/schema_stability.rs b/beacon_node/beacon_chain/tests/schema_stability.rs index 3b09921c15c..db7f7dbdbbd 100644 --- a/beacon_node/beacon_chain/tests/schema_stability.rs +++ b/beacon_node/beacon_chain/tests/schema_stability.rs @@ -4,6 +4,7 @@ use beacon_chain::{ persisted_custody::PersistedCustody, test_utils::{BeaconChainHarness, DiskHarnessType, test_spec}, }; +use bls::Keypair; use logging::create_test_tracing_subscriber; use operation_pool::PersistedOperationPool; use ssz::Encode; @@ -16,7 +17,7 @@ use store::{ }; use strum::IntoEnumIterator; use tempfile::{TempDir, tempdir}; -use types::{ChainSpec, Hash256, Keypair, MainnetEthSpec, Slot}; +use types::{ChainSpec, Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; type Store = Arc, BeaconNodeBackend>>; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 806c9dce7c1..ba0621ae720 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -11,7 +11,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, mock_execution_layer_from_parts, test_spec, }; -use beacon_chain::test_utils::{SyncCommitteeStrategy, generate_data_column_indices_rand_order}; +use beacon_chain::test_utils::{ + SyncCommitteeStrategy, fork_name_from_env, generate_data_column_indices_rand_order, +}; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, @@ -23,11 +25,14 @@ use beacon_chain::{ historical_blocks::HistoricalBlockError, migrate::MigratorConfig, }; +use bls::{Keypair, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use logging::create_test_tracing_subscriber; use maplit::hashset; use rand::Rng; use rand::rngs::StdRng; use slot_clock::{SlotClock, TestingSlotClock}; +use ssz_types::VariableList; use state_processing::{BlockReplayer, state_advance::complete_state_advance}; use std::collections::HashMap; use std::collections::HashSet; @@ -154,6 +159,7 @@ fn get_states_descendant_of_block( .collect() } +// TODO(EIP-7732) Extend to support gloas #[tokio::test] async fn light_client_bootstrap_test() { let spec = test_spec::(); @@ -201,7 +207,6 @@ async fn light_client_bootstrap_test() { LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Fulu(lc_bootstrap) => lc_bootstrap.header.beacon.slot, - LightClientBootstrap::Gloas(lc_bootstrap) => lc_bootstrap.header.beacon.slot, }; assert_eq!( @@ -1415,7 +1420,7 @@ async fn proposer_shuffling_changing_with_lookahead() { let execution_requests = ExecutionRequests:: { deposits: VariableList::new(vec![deposit_request]).unwrap(), - withdrawals: vec![].into(), + withdrawals: vec![].try_into().unwrap(), consolidations: VariableList::new(vec![consolidation_request]).unwrap(), }; @@ -1576,6 +1581,10 @@ async fn proposer_duties_from_head_fulu() { } /// Test that we can compute the proposer shuffling for the Gloas fork epoch itself using lookahead! +// TODO(EIP-7732): Extend to gloas +// `state.latest_execution_payload_header()` not available in Gloas +// called from `add_block_at_slot` -> `make_block` -> `produce_block_on_state` -> `produce_partial_beacon_block` -> `get_execution_payload` -> `Error` +#[ignore] #[tokio::test] async fn proposer_lookahead_gloas_fork_epoch() { let gloas_fork_epoch = Epoch::new(4); @@ -2703,7 +2712,7 @@ async fn weak_subjectivity_sync_easy() { let num_initial_slots = E::slots_per_epoch() * 11; let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let slots = (1..num_initial_slots).map(Slot::new).collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } #[tokio::test] @@ -2711,7 +2720,7 @@ async fn weak_subjectivity_sync_single_block_batches() { let num_initial_slots = E::slots_per_epoch() * 11; let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); let slots = (1..num_initial_slots).map(Slot::new).collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, Some(1)).await + weak_subjectivity_sync_test(slots, checkpoint_slot, Some(1), true).await } #[tokio::test] @@ -2725,7 +2734,7 @@ async fn weak_subjectivity_sync_unaligned_advanced_checkpoint() { slot <= checkpoint_slot - 3 || slot > checkpoint_slot }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } #[tokio::test] @@ -2739,7 +2748,7 @@ async fn weak_subjectivity_sync_unaligned_unadvanced_checkpoint() { slot <= checkpoint_slot || slot > checkpoint_slot + 3 }) .collect(); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } // Regression test for https://github.com/sigp/lighthouse/issues/4817 @@ -2751,7 +2760,7 @@ async fn weak_subjectivity_sync_skips_at_genesis() { let end_slot = E::slots_per_epoch() * 4; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await } // Checkpoint sync from the genesis state. @@ -2764,13 +2773,176 @@ async fn weak_subjectivity_sync_from_genesis() { let end_slot = E::slots_per_epoch() * 2; let slots = (start_slot..end_slot).map(Slot::new).collect(); let checkpoint_slot = Slot::new(0); - weak_subjectivity_sync_test(slots, checkpoint_slot, None).await + weak_subjectivity_sync_test(slots, checkpoint_slot, None, true).await +} + +// Test checkpoint sync without providing blobs - backfill should fetch them. +#[tokio::test] +async fn weak_subjectivity_sync_without_blobs() { + let start_slot = 4; + let end_slot = E::slots_per_epoch() * 4; + let slots = (start_slot..end_slot).map(Slot::new).collect(); + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 2); + weak_subjectivity_sync_test(slots, checkpoint_slot, None, false).await +} + +// Ensures that an unaligned checkpoint sync (the block is older than the state) +// works correctly even when `prune_payloads` is enabled. +// +// Previously, the `HotColdDB` would refuse to load the execution payload for the +// anchor block because it was considered "pruned", causing the node to fail startup. +#[tokio::test] +async fn reproduction_unaligned_checkpoint_sync_pruned_payload() { + let spec = test_spec::(); + + // Requires Execution Payloads. + let Some(_) = spec.deneb_fork_epoch else { + return; + }; + + // Create an unaligned checkpoint with a gap of 3 slots. + let num_initial_slots = E::slots_per_epoch() * 11; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9 - 3); + + let slots = (1..num_initial_slots) + .map(Slot::new) + .filter(|&slot| slot <= checkpoint_slot || slot > checkpoint_slot + 3) + .collect::>(); + + let temp1 = tempdir().unwrap(); + let full_store = get_store_generic(&temp1, StoreConfig::default(), spec.clone()); + + let harness = get_harness_import_all_data_columns(full_store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + // Extract snapshot data from the harness. + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + + // The test premise requires the anchor block to have a payload. + assert!(wss_block.message().execution_payload().is_ok()); + + let wss_blobs_opt = harness + .chain + .get_or_reconstruct_blobs(&wss_block_root) + .unwrap(); + + let wss_state = full_store + .get_state(&wss_state_root, Some(checkpoint_slot), CACHE_STATE_IN_TESTS) + .unwrap() + .unwrap(); + + // Configure the client with `prune_payloads = true`. + // This triggers the path where `try_get_full_block` must explicitly handle the anchor block. + let temp2 = tempdir().unwrap(); + let store_config = StoreConfig { + prune_payloads: true, + ..StoreConfig::default() + }; + + let store = get_store_generic(&temp2, store_config, spec.clone()); + + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(spec.seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let chain_config = ChainConfig { + reconstruct_historic_states: true, + ..ChainConfig::default() + }; + + let trusted_setup = get_kzg(&spec); + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); + let all_custody_columns = (0..spec.number_of_custody_groups).collect::>(); + + // Attempt to build the BeaconChain. + // If the bug is present, this will panic with `MissingFullBlockExecutionPayloadPruned`. + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec, trusted_setup) + .chain_config(chain_config) + .store(store.clone()) + .custom_spec(spec.clone().into()) + .task_executor(harness.chain.task_executor.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .event_handler(Some(ServerSentEventHandler::new_with_capacity(1))) + .execution_layer(Some(mock.el)) + .ordered_custody_column_indices(all_custody_columns) + .rng(Box::new(StdRng::seed_from_u64(42))) + .build(); + + assert!( + beacon_chain.is_ok(), + "Beacon Chain failed to build. The anchor payload may have been incorrectly pruned. Error: {:?}", + beacon_chain.err() + ); + + let chain = beacon_chain.as_ref().unwrap(); + let wss_block_slot = wss_block.slot(); + + assert_ne!( + wss_block_slot, + chain.head_snapshot().beacon_state.slot(), + "Test invalid: Checkpoint was aligned (Slot {} == Slot {}). The test did not trigger the unaligned edge case.", + wss_block_slot, + chain.head_snapshot().beacon_state.slot() + ); + + let payload_exists = chain + .store + .execution_payload_exists(&wss_block_root) + .unwrap_or(false); + + assert!( + payload_exists, + "Split block payload must exist in the new node's store after checkpoint sync" + ); } async fn weak_subjectivity_sync_test( slots: Vec, checkpoint_slot: Slot, backfill_batch_size: Option, + provide_blobs: bool, ) { // Build an initial chain on one harness, representing a synced node with full history. let num_final_blocks = E::slots_per_epoch() * 2; @@ -2872,7 +3044,11 @@ async fn weak_subjectivity_sync_test( .weak_subjectivity_state( wss_state, wss_block.clone(), - wss_blobs_opt.clone(), + if provide_blobs { + wss_blobs_opt.clone() + } else { + None + }, genesis_state, ) .unwrap() @@ -3148,6 +3324,10 @@ async fn weak_subjectivity_sync_test( .get_state(&state_root, Some(slot), CACHE_STATE_IN_TESTS) .unwrap() .unwrap(); + assert_eq!( + state_root, + beacon_chain.state_root_at_slot(slot).unwrap().unwrap() + ); assert_eq!(state.slot(), slot); assert_eq!(state.canonical_root().unwrap(), state_root); } @@ -3211,12 +3391,13 @@ async fn test_import_historical_data_columns_batch() { for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - for data_column in data_columns.unwrap() { + for data_column in data_columns.unwrap_or_default() { data_columns_list.push(data_column); } } + assert!(!data_columns_list.is_empty()); + harness .extend_chain( (E::slots_per_epoch() * 4) as usize, @@ -3260,8 +3441,18 @@ async fn test_import_historical_data_columns_batch() { // Assert that data columns now exist for epoch 0 for block in block_root_iter { let (block_root, _) = block.unwrap(); - let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()) + if !harness + .get_block(block_root.into()) + .unwrap() + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .is_empty() + { + let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + assert!(data_columns.is_some()) + }; } } @@ -3299,9 +3490,8 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { for block in block_root_iter { let (block_root, _) = block.unwrap(); let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); - assert!(data_columns.is_some()); - for data_column in data_columns.unwrap() { + for data_column in data_columns.unwrap_or_default() { let mut data_column = (*data_column).clone(); if data_column.index % 2 == 0 { data_column.signed_block_header.message.body_root = Hash256::ZERO; @@ -3310,6 +3500,7 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { data_columns_list.push(Arc::new(data_column)); } } + assert!(!data_columns_list.is_empty()); harness .extend_chain( @@ -3356,6 +3547,90 @@ async fn test_import_historical_data_columns_batch_mismatched_block_root() { )); } +// This should verify that a data column sidecar associated to a block root that doesn't exist in the store cannot +// be imported. +#[tokio::test] +async fn test_import_historical_data_columns_batch_no_block_found() { + if fork_name_from_env().is_some_and(|f| !f.fulu_enabled()) { + return; + }; + + let spec = test_spec::(); + let db_path = tempdir().unwrap(); + let store = get_store_generic(&db_path, StoreConfig::default(), spec); + let start_slot = Slot::new(1); + let end_slot = Slot::new(E::slots_per_epoch() * 2 - 1); + let cgc = 128; + + let harness = get_harness_import_all_data_columns(store.clone(), LOW_VALIDATOR_COUNT); + + harness + .extend_chain( + (E::slots_per_epoch() * 2) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + harness.advance_slot(); + + let block_root_iter = harness + .chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .unwrap(); + + let mut data_columns_list = vec![]; + + for block in block_root_iter { + let (block_root, _) = block.unwrap(); + let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + + for data_column in data_columns.unwrap_or_default() { + let mut data_column = (*data_column).clone(); + data_column.signed_block_header.message.body_root = Hash256::ZERO; + data_columns_list.push(Arc::new(data_column)); + } + } + + assert!(!data_columns_list.is_empty()); + + harness + .extend_chain( + (E::slots_per_epoch() * 4) as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + harness.advance_slot(); + + harness + .chain + .store + .try_prune_blobs(true, Epoch::new(2)) + .unwrap(); + + let block_root_iter = harness + .chain + .forwards_iter_block_roots_until(start_slot, end_slot) + .unwrap(); + + for block in block_root_iter { + let (block_root, _) = block.unwrap(); + let data_columns = harness.chain.store.get_data_columns(&block_root).unwrap(); + assert!(data_columns.is_none()) + } + + let error = harness + .chain + .import_historical_data_column_batch(Epoch::new(0), data_columns_list, cgc) + .unwrap_err(); + + assert!(matches!( + error, + HistoricalDataColumnError::NoBlockFound { .. } + )); +} + /// Test that blocks and attestations that refer to states around an unaligned split state are /// processed correctly. #[tokio::test] @@ -4043,6 +4318,12 @@ async fn deneb_prune_blobs_no_finalization() { /// Check that blob pruning does not fail trying to prune across the fork boundary. #[tokio::test] async fn prune_blobs_across_fork_boundary() { + // This test covers earlier forks and only need to be executed once. + // Note: this test is quite expensive (building a chain to epoch 15) and we should revisit this + if fork_name_from_env() != Some(ForkName::latest_stable()) { + return; + } + let mut spec = ForkName::Capella.make_genesis_spec(E::default_spec()); let deneb_fork_epoch = Epoch::new(4); @@ -4059,6 +4340,7 @@ async fn prune_blobs_across_fork_boundary() { let store = get_store_generic(&db_path, StoreConfig::default(), spec); let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + harness.execution_block_generator().set_min_blob_count(1); let blocks_to_deneb_finalization = E::slots_per_epoch() * 7; let blocks_to_electra_finalization = E::slots_per_epoch() * 4; @@ -4214,7 +4496,7 @@ async fn prune_blobs_across_fork_boundary() { // Fulu fork epochs // Pruning should have been triggered assert!(store.get_blob_info().oldest_blob_slot <= Some(oldest_slot)); - // Oldest blost slot should never be greater than the first fulu slot + // Oldest blob slot should never be greater than the first fulu slot let fulu_first_slot = fulu_fork_epoch.start_slot(E::slots_per_epoch()); assert!(store.get_blob_info().oldest_blob_slot <= Some(fulu_first_slot)); // Blobs should not exist post-Fulu @@ -4699,7 +4981,7 @@ async fn fulu_prune_data_columns_margin_test(margin: u64) { check_data_column_existence(&harness, oldest_data_column_slot, harness.head_slot(), true); } -/// Check tat there are data column sidecars (or not) at every slot in the range. +/// Check that there are data column sidecars (or not) at every slot in the range. fn check_data_column_existence( harness: &TestHarness, start_slot: Slot, @@ -5402,7 +5684,6 @@ fn get_finalized_epoch_boundary_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_state.finalized_checkpoint().root.into()) .collect() } @@ -5411,7 +5692,6 @@ fn get_blocks( dump: &[BeaconSnapshot>], ) -> HashSet { dump.iter() - .cloned() .map(|checkpoint| checkpoint.beacon_block_root.into()) .collect() } diff --git a/beacon_node/beacon_chain/tests/sync_committee_verification.rs b/beacon_node/beacon_chain/tests/sync_committee_verification.rs index 9dd12410fbb..d2124c66415 100644 --- a/beacon_node/beacon_chain/tests/sync_committee_verification.rs +++ b/beacon_node/beacon_chain/tests/sync_committee_verification.rs @@ -2,6 +2,8 @@ use beacon_chain::sync_committee_verification::{Error as SyncCommitteeError, SyncCommitteeData}; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee}; +use bls::{AggregateSignature, Keypair, SecretKey}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use safe_arith::SafeArith; use state_processing::{ @@ -11,10 +13,11 @@ use state_processing::{ use std::sync::LazyLock; use store::{SignedContributionAndProof, SyncCommitteeMessage}; use tree_hash::TreeHash; +use typenum::Unsigned; use types::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; use types::{ - AggregateSignature, Epoch, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, - SecretKey, Slot, SyncContributionData, SyncSelectionProof, SyncSubnetId, Unsigned, + Epoch, EthSpec, Hash256, MainnetEthSpec, Slot, SyncContributionData, SyncSelectionProof, + SyncSubnetId, }; pub type E = MainnetEthSpec; diff --git a/beacon_node/beacon_chain/tests/tests.rs b/beacon_node/beacon_chain/tests/tests.rs index ec0e607d00a..17d9c5f697f 100644 --- a/beacon_node/beacon_chain/tests/tests.rs +++ b/beacon_node/beacon_chain/tests/tests.rs @@ -8,13 +8,14 @@ use beacon_chain::{ OP_POOL_DB_KEY, }, }; +use bls::Keypair; use operation_pool::PersistedOperationPool; use state_processing::EpochProcessingError; use state_processing::{per_slot_processing, per_slot_processing::Error as SlotProcessingError}; use std::sync::LazyLock; use types::{ - BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, Keypair, - MinimalEthSpec, RelativeEpoch, Slot, + BeaconState, BeaconStateError, BlockImportSource, Checkpoint, EthSpec, Hash256, MinimalEthSpec, + RelativeEpoch, Slot, }; type E = MinimalEthSpec; diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 95732abeb5d..521fc4ac975 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -2,8 +2,9 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, }; use beacon_chain::validator_monitor::{MISSED_BLOCK_LAG_SLOTS, ValidatorMonitorConfig}; +use bls::{Keypair, PublicKeyBytes}; use std::sync::LazyLock; -use types::{Epoch, EthSpec, Hash256, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Hash256, MainnetEthSpec, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 28ed0cca913..1cdf3693ff2 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -123,10 +123,10 @@ pub struct BeaconProcessorQueueLengths { gossip_data_column_queue: usize, delayed_block_queue: usize, status_queue: usize, - bbrange_queue: usize, - bbroots_queue: usize, - blbroots_queue: usize, - blbrange_queue: usize, + block_brange_queue: usize, + block_broots_queue: usize, + blob_broots_queue: usize, + blob_brange_queue: usize, dcbroots_queue: usize, dcbrange_queue: usize, gossip_bls_to_execution_change_queue: usize, @@ -189,10 +189,10 @@ impl BeaconProcessorQueueLengths { gossip_data_column_queue: 1024, delayed_block_queue: 1024, status_queue: 1024, - bbrange_queue: 1024, - bbroots_queue: 1024, - blbroots_queue: 1024, - blbrange_queue: 1024, + block_brange_queue: 1024, + block_broots_queue: 1024, + blob_broots_queue: 1024, + blob_brange_queue: 1024, dcbroots_queue: 1024, dcbrange_queue: 1024, gossip_bls_to_execution_change_queue: 16384, @@ -876,10 +876,10 @@ impl BeaconProcessor { let mut delayed_block_queue = FifoQueue::new(queue_lengths.delayed_block_queue); let mut status_queue = FifoQueue::new(queue_lengths.status_queue); - let mut bbrange_queue = FifoQueue::new(queue_lengths.bbrange_queue); - let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); - let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); - let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); + let mut block_brange_queue = FifoQueue::new(queue_lengths.block_brange_queue); + let mut block_broots_queue = FifoQueue::new(queue_lengths.block_broots_queue); + let mut blob_broots_queue = FifoQueue::new(queue_lengths.blob_broots_queue); + let mut blob_brange_queue = FifoQueue::new(queue_lengths.blob_brange_queue); let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); @@ -1190,13 +1190,13 @@ impl BeaconProcessor { // and BlocksByRoot) } else if let Some(item) = status_queue.pop() { Some(item) - } else if let Some(item) = bbrange_queue.pop() { + } else if let Some(item) = block_brange_queue.pop() { Some(item) - } else if let Some(item) = bbroots_queue.pop() { + } else if let Some(item) = block_broots_queue.pop() { Some(item) - } else if let Some(item) = blbrange_queue.pop() { + } else if let Some(item) = blob_brange_queue.pop() { Some(item) - } else if let Some(item) = blbroots_queue.pop() { + } else if let Some(item) = blob_broots_queue.pop() { Some(item) } else if let Some(item) = dcbroots_queue.pop() { Some(item) @@ -1360,9 +1360,15 @@ impl BeaconProcessor { backfill_chain_segment.push(work, work_id) } Work::Status { .. } => status_queue.push(work, work_id), - Work::BlocksByRangeRequest { .. } => bbrange_queue.push(work, work_id), - Work::BlocksByRootsRequest { .. } => bbroots_queue.push(work, work_id), - Work::BlobsByRangeRequest { .. } => blbrange_queue.push(work, work_id), + Work::BlocksByRangeRequest { .. } => { + block_brange_queue.push(work, work_id) + } + Work::BlocksByRootsRequest { .. } => { + block_broots_queue.push(work, work_id) + } + Work::BlobsByRangeRequest { .. } => { + blob_brange_queue.push(work, work_id) + } Work::LightClientBootstrapRequest { .. } => { lc_bootstrap_queue.push(work, work_id) } @@ -1384,7 +1390,9 @@ impl BeaconProcessor { Work::GossipBlsToExecutionChange { .. } => { gossip_bls_to_execution_change_queue.push(work, work_id) } - Work::BlobsByRootsRequest { .. } => blbroots_queue.push(work, work_id), + Work::BlobsByRootsRequest { .. } => { + blob_broots_queue.push(work, work_id) + } Work::DataColumnsByRootsRequest { .. } => { dcbroots_queue.push(work, work_id) } @@ -1435,10 +1443,10 @@ impl BeaconProcessor { WorkType::ChainSegment => chain_segment_queue.len(), WorkType::ChainSegmentBackfill => backfill_chain_segment.len(), WorkType::Status => status_queue.len(), - WorkType::BlocksByRangeRequest => blbrange_queue.len(), - WorkType::BlocksByRootsRequest => blbroots_queue.len(), - WorkType::BlobsByRangeRequest => bbrange_queue.len(), - WorkType::BlobsByRootsRequest => bbroots_queue.len(), + WorkType::BlocksByRangeRequest => block_brange_queue.len(), + WorkType::BlocksByRootsRequest => block_broots_queue.len(), + WorkType::BlobsByRangeRequest => blob_brange_queue.len(), + WorkType::BlobsByRootsRequest => blob_broots_queue.len(), WorkType::DataColumnsByRootsRequest => dcbroots_queue.len(), WorkType::DataColumnsByRangeRequest => dcbrange_queue.len(), WorkType::GossipBlsToExecutionChange => { diff --git a/beacon_node/builder_client/Cargo.toml b/beacon_node/builder_client/Cargo.toml index 9b1f86360df..09bf3f48b4e 100644 --- a/beacon_node/builder_client/Cargo.toml +++ b/beacon_node/builder_client/Cargo.toml @@ -5,6 +5,8 @@ edition = { workspace = true } authors = ["Sean Anderson "] [dependencies] +bls = { workspace = true } +context_deserialize = { workspace = true } eth2 = { workspace = true } ethereum_ssz = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 6b993542f35..4fc6b3a379b 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -1,9 +1,11 @@ +use bls::PublicKeyBytes; +use context_deserialize::ContextDeserialize; pub use eth2::Error; use eth2::types::beacon_response::EmptyMetadata; use eth2::types::builder_bid::SignedBuilderBid; use eth2::types::{ - ContentType, ContextDeserialize, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, - ForkVersionedResponse, PublicKeyBytes, SignedValidatorRegistrationData, Slot, + ContentType, EthSpec, ExecutionBlockHash, ForkName, ForkVersionDecode, ForkVersionedResponse, + SignedValidatorRegistrationData, Slot, }; use eth2::types::{FullPayloadContents, SignedBlindedBeaconBlock}; use eth2::{ @@ -270,7 +272,7 @@ impl BuilderHttpClient { &self, validator: &[SignedValidatorRegistrationData], ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -289,7 +291,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); let body = blinded_block.as_ssz_bytes(); @@ -337,7 +339,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); let body = blinded_block.as_ssz_bytes(); @@ -387,7 +389,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -430,7 +432,7 @@ impl BuilderHttpClient { &self, blinded_block: &SignedBlindedBeaconBlock, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -480,7 +482,7 @@ impl BuilderHttpClient { parent_hash: ExecutionBlockHash, pubkey: &PublicKeyBytes, ) -> Result>>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -521,7 +523,7 @@ impl BuilderHttpClient { /// `GET /eth/v1/builder/status` pub async fn get_builder_status(&self) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -538,9 +540,10 @@ impl BuilderHttpClient { #[cfg(test)] mod tests { use super::*; + use bls::Signature; + use eth2::types::MainnetEthSpec; use eth2::types::builder_bid::{BuilderBid, BuilderBidFulu}; use eth2::types::test_utils::{SeedableRng, TestRandom, XorShiftRng}; - use eth2::types::{MainnetEthSpec, Signature}; use mockito::{Matcher, Server, ServerGuard}; type E = MainnetEthSpec; diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 380e0c114a4..c48021e45d4 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -42,7 +42,7 @@ use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; -use tracing::{debug, info, warn}; +use tracing::{debug, info, instrument, warn}; use types::data_column_custody_group::compute_ordered_custody_column_indices; use types::{ BeaconState, BlobSidecarList, ChainSpec, EthSpec, ExecutionBlockHash, Hash256, @@ -151,6 +151,7 @@ where /// Initializes the `BeaconChainBuilder`. The `build_beacon_chain` method will need to be /// called later in order to actually instantiate the `BeaconChain`. + #[instrument(skip_all)] pub async fn beacon_chain_builder( mut self, client_genesis: ClientGenesis, @@ -354,15 +355,10 @@ where let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; - // `BlobSidecar` is no longer used from Fulu onwards (superseded by `DataColumnSidecar`), - // which will be fetched via rpc instead (unimplemented). - let is_before_fulu = !spec - .fork_name_at_slot::(anchor_block.slot()) - .fulu_enabled(); - let anchor_blobs = if is_before_fulu && anchor_block.message().body().has_blobs() { + // Providing blobs is optional now and not providing them is recommended. + // Backfill can handle downloading the blobs or columns for the checkpoint block. + let anchor_blobs = if let Some(anchor_blobs_bytes) = anchor_blobs_bytes { let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; - let anchor_blobs_bytes = anchor_blobs_bytes - .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; Some( BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes, max_blobs_len) .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, @@ -618,6 +614,7 @@ where /// /// If type inference errors are being raised, see the comment on the definition of `Self`. #[allow(clippy::type_complexity)] + #[instrument(name = "build_client", skip_all)] pub fn build( mut self, ) -> Result>, String> { @@ -818,6 +815,7 @@ where TColdStore: ItemStore + 'static, { /// Consumes the internal `BeaconChainBuilder`, attaching the resulting `BeaconChain` to self. + #[instrument(skip_all)] pub fn build_beacon_chain(mut self) -> Result { let context = self .runtime_context diff --git a/beacon_node/client/src/metrics.rs b/beacon_node/client/src/metrics.rs index 605a7346886..6ff3eb6a70f 100644 --- a/beacon_node/client/src/metrics.rs +++ b/beacon_node/client/src/metrics.rs @@ -15,6 +15,13 @@ pub static IS_SYNCED: LazyLock> = LazyLock::new(|| { ) }); +pub static IS_OPTIMISTIC_SYNC: LazyLock> = LazyLock::new(|| { + try_create_int_gauge( + "optimistic_sync", + "Metric to check if the beacon chain is in optimistic sync mode. 0 if synced and 1 if optimistic sync", + ) +}); + pub static NOTIFIER_HEAD_SLOT: LazyLock> = LazyLock::new(|| { try_create_int_gauge( "notifier_head_slot", diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index b1cf1bd7f55..52a3b92cb60 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -369,8 +369,12 @@ pub fn spawn_notifier( let block_hash = match beacon_chain.canonical_head.head_execution_status() { Ok(ExecutionStatus::Irrelevant(_)) => "n/a".to_string(), - Ok(ExecutionStatus::Valid(hash)) => format!("{} (verified)", hash), + Ok(ExecutionStatus::Valid(hash)) => { + metrics::set_gauge(&metrics::IS_OPTIMISTIC_SYNC, 0); + format!("{} (verified)", hash) + } Ok(ExecutionStatus::Optimistic(hash)) => { + metrics::set_gauge(&metrics::IS_OPTIMISTIC_SYNC, 1); warn!( info = "chain not fully verified, \ block and attestation production disabled until execution engine syncs", diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index f56159c7b55..c443e945743 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -8,13 +8,14 @@ edition = { workspace = true } alloy-consensus = { workspace = true } alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } +alloy-rpc-types-eth = { workspace = true } arc-swap = "1.6.0" +bls = { workspace = true } builder_client = { path = "../builder_client" } bytes = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["events", "lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } -ethers-core = { workspace = true } fixed_bytes = { workspace = true } fork_choice = { workspace = true } hash-db = "0.15.2" @@ -48,6 +49,7 @@ tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } triehash = "0.8.4" +typenum = { workspace = true } types = { workspace = true } warp = { workspace = true } zeroize = { workspace = true } diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 98da7dbf2c7..32090bccfc9 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -20,8 +20,8 @@ use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ Address, BeaconBlockRef, ConsolidationRequest, EthSpec, ExecutionBlockHash, ExecutionPayload, - ExecutionPayloadHeader, ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, - Uint256, VariableList, Withdrawal, Withdrawals, + ExecutionPayloadHeader, ExecutionPayloadRef, ForkName, Hash256, Transactions, Uint256, + Withdrawal, Withdrawals, }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, @@ -541,34 +541,6 @@ impl ExecutionPayloadBodyV1 { )) } } - ExecutionPayloadHeader::Gloas(header) => { - if let Some(withdrawals) = self.withdrawals { - Ok(ExecutionPayload::Gloas(ExecutionPayloadGloas { - parent_hash: header.parent_hash, - fee_recipient: header.fee_recipient, - state_root: header.state_root, - receipts_root: header.receipts_root, - logs_bloom: header.logs_bloom, - prev_randao: header.prev_randao, - block_number: header.block_number, - gas_limit: header.gas_limit, - gas_used: header.gas_used, - timestamp: header.timestamp, - extra_data: header.extra_data, - base_fee_per_gas: header.base_fee_per_gas, - block_hash: header.block_hash, - transactions: self.transactions, - withdrawals, - blob_gas_used: header.blob_gas_used, - excess_blob_gas: header.excess_blob_gas, - })) - } else { - Err(format!( - "block {} is post capella but payload body doesn't have withdrawals", - header.block_hash - )) - } - } } } } @@ -763,14 +735,18 @@ pub struct ClientVersionV1 { } impl ClientVersionV1 { - pub fn calculate_graffiti(&self, lighthouse_commit_prefix: CommitPrefix) -> Graffiti { - let graffiti_string = format!( + pub fn calculate_graffiti( + &self, + lighthouse_commit_prefix: CommitPrefix, + validator_graffiti: Option, + ) -> Graffiti { + let append_graffiti_full = format!( "{}{}LH{}", self.code, self.commit .0 .get(..4) - .map_or_else(|| self.commit.0.as_str(), |s| s) + .unwrap_or(self.commit.0.as_str()) .to_lowercase(), lighthouse_commit_prefix .0 @@ -778,6 +754,53 @@ impl ClientVersionV1 { .unwrap_or("0000") .to_lowercase(), ); + + // Implement the special case here: + // https://hackmd.io/@wmoBhF17RAOH2NZ5bNXJVg/BJX2c9gja#SPECIAL-CASE-the-flexible-standard + let append_graffiti_one_byte = format!( + "{}{}LH{}", + self.code, + self.commit + .0 + .get(..2) + .unwrap_or(self.commit.0.as_str()) + .to_lowercase(), + lighthouse_commit_prefix + .0 + .get(..2) + .unwrap_or("00") + .to_lowercase(), + ); + + let append_graffiti_no_commit = format!("{}LH", self.code); + let append_graffiti_only_el = format!("{}", self.code); + + let graffiti_string = if let Some(graffiti) = validator_graffiti { + let graffiti_length = graffiti.as_utf8_lossy().len(); + let graffiti_str = graffiti.as_utf8_lossy(); + + // 12 characters for append_graffiti_full, plus one character for spacing + // that leaves user specified graffiti to be 32-12-1 = 19 characters max, i.e., <20 + if graffiti_length < 20 { + format!("{} {}", append_graffiti_full, graffiti_str) + // user-specified graffiti is between 20-23 characters + } else if (20..24).contains(&graffiti_length) { + format!("{} {}", append_graffiti_one_byte, graffiti_str) + // user-specified graffiti is between 24-27 characters + } else if (24..28).contains(&graffiti_length) { + format!("{} {}", append_graffiti_no_commit, graffiti_str) + // user-specified graffiti is between 28-29 characters + } else if (28..30).contains(&graffiti_length) { + format!("{} {}", append_graffiti_only_el, graffiti_str) + // if user-specified graffiti is between 30-32 characters, append nothing + } else { + return graffiti; + } + } else { + // if no validator_graffiti (user doesn't specify), use the full client version info graffiti + append_graffiti_full + }; + let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; let bytes_to_copy = std::cmp::min(graffiti_string.len(), GRAFFITI_BYTES_LEN); graffiti_bytes[..bytes_to_copy] diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index bc927e19b41..c421491f808 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -103,9 +103,10 @@ pub static LIGHTHOUSE_JSON_CLIENT_VERSION: LazyLock = /// Contains methods to convert arbitrary bytes to an ETH2 deposit contract object. pub mod deposit_log { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Decode; use state_processing::per_block_processing::signature_sets::deposit_pubkey_signature_message; - use types::{ChainSpec, DepositData, Hash256, PublicKeyBytes, SignatureBytes}; + use types::{ChainSpec, DepositData, Hash256}; pub use eth2::lighthouse::DepositLog; @@ -652,7 +653,7 @@ impl HttpJsonRpc { let mut request = self .client - .post(self.url.full.clone()) + .post(self.url.expose_full().clone()) .timeout(timeout) .header(CONTENT_TYPE, "application/json") .json(&body); @@ -768,7 +769,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayload::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -785,7 +786,7 @@ impl HttpJsonRpc { &self, execution_payload: ExecutionPayload, ) -> Result { - let params = json!([JsonExecutionPayload::from(execution_payload)]); + let params = json!([JsonExecutionPayload::try_from(execution_payload)?]); let response: JsonPayloadStatusV1 = self .rpc_request( @@ -803,7 +804,12 @@ impl HttpJsonRpc { new_payload_request_deneb: NewPayloadRequestDeneb<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Deneb(new_payload_request_deneb.execution_payload.clone().into()), + JsonExecutionPayload::Deneb( + new_payload_request_deneb + .execution_payload + .clone() + .try_into()? + ), new_payload_request_deneb.versioned_hashes, new_payload_request_deneb.parent_beacon_block_root, ]); @@ -825,7 +831,10 @@ impl HttpJsonRpc { ) -> Result { let params = json!([ JsonExecutionPayload::Electra( - new_payload_request_electra.execution_payload.clone().into() + new_payload_request_electra + .execution_payload + .clone() + .try_into()? ), new_payload_request_electra.versioned_hashes, new_payload_request_electra.parent_beacon_block_root, @@ -850,7 +859,12 @@ impl HttpJsonRpc { new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Fulu(new_payload_request_fulu.execution_payload.clone().into()), + JsonExecutionPayload::Fulu( + new_payload_request_fulu + .execution_payload + .clone() + .try_into()? + ), new_payload_request_fulu.versioned_hashes, new_payload_request_fulu.parent_beacon_block_root, new_payload_request_fulu @@ -874,7 +888,12 @@ impl HttpJsonRpc { new_payload_request_gloas: NewPayloadRequestGloas<'_, E>, ) -> Result { let params = json!([ - JsonExecutionPayload::Gloas(new_payload_request_gloas.execution_payload.clone().into()), + JsonExecutionPayload::Gloas( + new_payload_request_gloas + .execution_payload + .clone() + .try_into()? + ), new_payload_request_gloas.versioned_hashes, new_payload_request_gloas.parent_beacon_block_root, new_payload_request_gloas @@ -1125,10 +1144,14 @@ impl HttpJsonRpc { ) .await?; - Ok(response + response .into_iter() - .map(|opt_json| opt_json.map(From::from)) - .collect()) + .map(|opt_json| { + opt_json + .map(|json| json.try_into().map_err(Error::from)) + .transpose() + }) + .collect::, _>>() } pub async fn get_payload_bodies_by_range_v1( @@ -1149,10 +1172,14 @@ impl HttpJsonRpc { ) .await?; - Ok(response + response .into_iter() - .map(|opt_json| opt_json.map(From::from)) - .collect()) + .map(|opt_json| { + opt_json + .map(|json| json.try_into().map_err(Error::from)) + .transpose() + }) + .collect::, _>>() } pub async fn exchange_capabilities(&self) -> Result { @@ -1440,10 +1467,13 @@ mod test { use super::auth::JwtKey; use super::*; use crate::test_utils::{DEFAULT_JWT_SECRET, MockServer}; + use fixed_bytes::FixedBytesExtended; + use ssz_types::VariableList; use std::future::Future; use std::str::FromStr; use std::sync::Arc; - use types::{FixedBytesExtended, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::MainnetEthSpec; struct Tester { server: MockServer, @@ -1453,8 +1483,7 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { - let spec = Arc::new(MainnetEthSpec::default_spec()); - let server = MockServer::unit_testing(spec); + let server = MockServer::unit_testing(); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); @@ -1814,16 +1843,16 @@ mod test { fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), + logs_bloom: vec![1; 256].try_into().unwrap(), prev_randao: Hash256::repeat_byte(1), block_number: 0, gas_limit: 1, gas_used: 2, timestamp: 42, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(1), block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }, )) .await; @@ -1861,16 +1890,16 @@ mod test { fee_recipient: Address::repeat_byte(1), state_root: Hash256::repeat_byte(1), receipts_root: Hash256::repeat_byte(0), - logs_bloom: vec![1; 256].into(), + logs_bloom: vec![1; 256].try_into().unwrap(), prev_randao: Hash256::repeat_byte(1), block_number: 0, gas_limit: 1, gas_used: 2, timestamp: 42, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(1), block_hash: ExecutionBlockHash::repeat_byte(1), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }, )) .await @@ -2071,16 +2100,16 @@ mod test { fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c95111",16).unwrap(), gas_used: 0, timestamp: 5, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x6359b8381a370e2f54072a5784ddd78b6ed024991558c511d4452eb4f6ac898c").unwrap(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }); assert_eq!(payload, expected); @@ -2096,16 +2125,16 @@ mod test { fee_recipient: Address::from_str("0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(), state_root: Hash256::from_str("0xca3149fa9e37db08d1cd49c9061db1002ef1cd58db2210f2115c8c989b2bdf45").unwrap(), receipts_root: Hash256::from_str("0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421").unwrap(), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: Hash256::zero(), block_number: 1, gas_limit: u64::from_str_radix("1c9c380",16).unwrap(), gas_used: 0, timestamp: 5, - extra_data: vec![].into(), + extra_data: vec![].try_into().unwrap(), base_fee_per_gas: Uint256::from(7), block_hash: ExecutionBlockHash::from_str("0x3559e851470f6e7bbed1db474980683e8c315bfce99b2a6ef47c057c04de7858").unwrap(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), })) .await; }, diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 33decd4ec86..fc8eae015b9 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,7 +1,8 @@ use super::*; use alloy_rlp::RlpEncodable; use serde::{Deserialize, Serialize}; -use ssz::Decode; +use ssz::{Decode, TryFromIter}; +use ssz_types::{FixedVector, VariableList, typenum::Unsigned}; use strum::EnumString; use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; @@ -9,7 +10,7 @@ use types::blob_sidecar::BlobsList; use types::execution_requests::{ ConsolidationRequests, DepositRequests, RequestType, WithdrawalRequests, }; -use types::{Blob, FixedVector, KzgProof, Unsigned}; +use types::{Blob, KzgProof}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -130,9 +131,11 @@ impl From> for JsonExecutionPayloadBell } } } -impl From> for JsonExecutionPayloadCapella { - fn from(payload: ExecutionPayloadCapella) -> Self { - JsonExecutionPayloadCapella { +impl TryFrom> for JsonExecutionPayloadCapella { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadCapella) -> Result { + Ok(JsonExecutionPayloadCapella { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -147,18 +150,15 @@ impl From> for JsonExecutionPayloadCapell base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), - } + withdrawals: withdrawals_to_json(payload.withdrawals)?, + }) } } -impl From> for JsonExecutionPayloadDeneb { - fn from(payload: ExecutionPayloadDeneb) -> Self { - JsonExecutionPayloadDeneb { +impl TryFrom> for JsonExecutionPayloadDeneb { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadDeneb) -> Result { + Ok(JsonExecutionPayloadDeneb { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -173,21 +173,18 @@ impl From> for JsonExecutionPayloadDeneb base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadElectra { - fn from(payload: ExecutionPayloadElectra) -> Self { - JsonExecutionPayloadElectra { +impl TryFrom> for JsonExecutionPayloadElectra { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadElectra) -> Result { + Ok(JsonExecutionPayloadElectra { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -202,21 +199,18 @@ impl From> for JsonExecutionPayloadElectr base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadFulu { - fn from(payload: ExecutionPayloadFulu) -> Self { - JsonExecutionPayloadFulu { +impl TryFrom> for JsonExecutionPayloadFulu { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadFulu) -> Result { + Ok(JsonExecutionPayloadFulu { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -231,21 +225,18 @@ impl From> for JsonExecutionPayloadFulu { base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayloadGloas { - fn from(payload: ExecutionPayloadGloas) -> Self { - JsonExecutionPayloadGloas { +impl TryFrom> for JsonExecutionPayloadGloas { + type Error = ssz_types::Error; + + fn try_from(payload: ExecutionPayloadGloas) -> Result { + Ok(JsonExecutionPayloadGloas { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -260,27 +251,34 @@ impl From> for JsonExecutionPayloadGloas base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_to_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for JsonExecutionPayload { - fn from(execution_payload: ExecutionPayload) -> Self { +impl TryFrom> for JsonExecutionPayload { + type Error = ssz_types::Error; + + fn try_from(execution_payload: ExecutionPayload) -> Result { match execution_payload { - ExecutionPayload::Bellatrix(payload) => JsonExecutionPayload::Bellatrix(payload.into()), - ExecutionPayload::Capella(payload) => JsonExecutionPayload::Capella(payload.into()), - ExecutionPayload::Deneb(payload) => JsonExecutionPayload::Deneb(payload.into()), - ExecutionPayload::Electra(payload) => JsonExecutionPayload::Electra(payload.into()), - ExecutionPayload::Fulu(payload) => JsonExecutionPayload::Fulu(payload.into()), - ExecutionPayload::Gloas(payload) => JsonExecutionPayload::Gloas(payload.into()), + ExecutionPayload::Bellatrix(payload) => { + Ok(JsonExecutionPayload::Bellatrix(payload.into())) + } + ExecutionPayload::Capella(payload) => { + Ok(JsonExecutionPayload::Capella(payload.try_into()?)) + } + ExecutionPayload::Deneb(payload) => { + Ok(JsonExecutionPayload::Deneb(payload.try_into()?)) + } + ExecutionPayload::Electra(payload) => { + Ok(JsonExecutionPayload::Electra(payload.try_into()?)) + } + ExecutionPayload::Fulu(payload) => Ok(JsonExecutionPayload::Fulu(payload.try_into()?)), + ExecutionPayload::Gloas(payload) => { + Ok(JsonExecutionPayload::Gloas(payload.try_into()?)) + } } } } @@ -305,9 +303,11 @@ impl From> for ExecutionPayloadBell } } } -impl From> for ExecutionPayloadCapella { - fn from(payload: JsonExecutionPayloadCapella) -> Self { - ExecutionPayloadCapella { +impl TryFrom> for ExecutionPayloadCapella { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadCapella) -> Result { + Ok(ExecutionPayloadCapella { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -322,19 +322,16 @@ impl From> for ExecutionPayloadCapell base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), - } + withdrawals: withdrawals_from_json(payload.withdrawals)?, + }) } } -impl From> for ExecutionPayloadDeneb { - fn from(payload: JsonExecutionPayloadDeneb) -> Self { - ExecutionPayloadDeneb { +impl TryFrom> for ExecutionPayloadDeneb { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadDeneb) -> Result { + Ok(ExecutionPayloadDeneb { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -349,21 +346,18 @@ impl From> for ExecutionPayloadDeneb base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadElectra { - fn from(payload: JsonExecutionPayloadElectra) -> Self { - ExecutionPayloadElectra { +impl TryFrom> for ExecutionPayloadElectra { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadElectra) -> Result { + Ok(ExecutionPayloadElectra { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -378,21 +372,18 @@ impl From> for ExecutionPayloadElectr base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadFulu { - fn from(payload: JsonExecutionPayloadFulu) -> Self { - ExecutionPayloadFulu { +impl TryFrom> for ExecutionPayloadFulu { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadFulu) -> Result { + Ok(ExecutionPayloadFulu { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -407,21 +398,18 @@ impl From> for ExecutionPayloadFulu { base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayloadGloas { - fn from(payload: JsonExecutionPayloadGloas) -> Self { - ExecutionPayloadGloas { +impl TryFrom> for ExecutionPayloadGloas { + type Error = ssz_types::Error; + + fn try_from(payload: JsonExecutionPayloadGloas) -> Result { + Ok(ExecutionPayloadGloas { parent_hash: payload.parent_hash, fee_recipient: payload.fee_recipient, state_root: payload.state_root, @@ -436,27 +424,34 @@ impl From> for ExecutionPayloadGloas base_fee_per_gas: payload.base_fee_per_gas, block_hash: payload.block_hash, transactions: payload.transactions, - withdrawals: payload - .withdrawals - .into_iter() - .map(Into::into) - .collect::>() - .into(), + withdrawals: withdrawals_from_json(payload.withdrawals)?, blob_gas_used: payload.blob_gas_used, excess_blob_gas: payload.excess_blob_gas, - } + }) } } -impl From> for ExecutionPayload { - fn from(json_execution_payload: JsonExecutionPayload) -> Self { +impl TryFrom> for ExecutionPayload { + type Error = ssz_types::Error; + + fn try_from(json_execution_payload: JsonExecutionPayload) -> Result { match json_execution_payload { - JsonExecutionPayload::Bellatrix(payload) => ExecutionPayload::Bellatrix(payload.into()), - JsonExecutionPayload::Capella(payload) => ExecutionPayload::Capella(payload.into()), - JsonExecutionPayload::Deneb(payload) => ExecutionPayload::Deneb(payload.into()), - JsonExecutionPayload::Electra(payload) => ExecutionPayload::Electra(payload.into()), - JsonExecutionPayload::Fulu(payload) => ExecutionPayload::Fulu(payload.into()), - JsonExecutionPayload::Gloas(payload) => ExecutionPayload::Gloas(payload.into()), + JsonExecutionPayload::Bellatrix(payload) => { + Ok(ExecutionPayload::Bellatrix(payload.into())) + } + JsonExecutionPayload::Capella(payload) => { + Ok(ExecutionPayload::Capella(payload.try_into()?)) + } + JsonExecutionPayload::Deneb(payload) => { + Ok(ExecutionPayload::Deneb(payload.try_into()?)) + } + JsonExecutionPayload::Electra(payload) => { + Ok(ExecutionPayload::Electra(payload.try_into()?)) + } + JsonExecutionPayload::Fulu(payload) => Ok(ExecutionPayload::Fulu(payload.try_into()?)), + JsonExecutionPayload::Gloas(payload) => { + Ok(ExecutionPayload::Gloas(payload.try_into()?)) + } } } } @@ -590,13 +585,17 @@ impl TryFrom> for GetPayloadResponse { } JsonGetPayloadResponse::Capella(response) => { Ok(GetPayloadResponse::Capella(GetPayloadResponseCapella { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, })) } JsonGetPayloadResponse::Deneb(response) => { Ok(GetPayloadResponse::Deneb(GetPayloadResponseDeneb { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, @@ -604,34 +603,40 @@ impl TryFrom> for GetPayloadResponse { } JsonGetPayloadResponse::Electra(response) => { Ok(GetPayloadResponse::Electra(GetPayloadResponseElectra { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests : {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } JsonGetPayloadResponse::Fulu(response) => { Ok(GetPayloadResponse::Fulu(GetPayloadResponseFulu { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } JsonGetPayloadResponse::Gloas(response) => { Ok(GetPayloadResponse::Gloas(GetPayloadResponseGloas { - execution_payload: response.execution_payload.into(), + execution_payload: response.execution_payload.try_into().map_err(|e| { + format!("Failed to convert json to execution payload: {:?}", e) + })?, block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, requests: response.execution_requests.try_into().map_err(|e| { - format!("Failed to convert json to execution requests {:?}", e) + format!("Failed to convert json to execution requests: {:?}", e) })?, })) } @@ -673,6 +678,26 @@ impl From for Withdrawal { } } } + +// Helper functions to convert between `VariableList` and `VariableList`. +fn withdrawals_to_json( + list: VariableList, +) -> Result, ssz_types::Error> +where + N: Unsigned, +{ + VariableList::try_from_iter(list.into_iter().map(Into::into)) +} + +fn withdrawals_from_json( + list: VariableList, +) -> Result, ssz_types::Error> +where + N: Unsigned, +{ + VariableList::try_from_iter(list.into_iter().map(Into::into)) +} + #[derive(Debug, PartialEq, Clone, RlpEncodable)] pub struct EncodableJsonWithdrawal<'a> { pub index: u64, @@ -976,30 +1001,25 @@ pub struct JsonExecutionPayloadBodyV1 { pub withdrawals: Option>, } -impl From> for ExecutionPayloadBodyV1 { - fn from(value: JsonExecutionPayloadBodyV1) -> Self { - Self { +impl TryFrom> for ExecutionPayloadBodyV1 { + type Error = ssz_types::Error; + + fn try_from(value: JsonExecutionPayloadBodyV1) -> Result { + Ok(Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|json_withdrawals| { - Withdrawals::::from( - json_withdrawals - .into_iter() - .map(Into::into) - .collect::>(), - ) - }), - } + withdrawals: value.withdrawals.map(withdrawals_from_json).transpose()?, + }) } } -impl From> for JsonExecutionPayloadBodyV1 { - fn from(value: ExecutionPayloadBodyV1) -> Self { - Self { +impl TryFrom> for JsonExecutionPayloadBodyV1 { + type Error = ssz_types::Error; + + fn try_from(value: ExecutionPayloadBodyV1) -> Result { + Ok(Self { transactions: value.transactions, - withdrawals: value.withdrawals.map(|withdrawals| { - VariableList::from(withdrawals.into_iter().map(Into::into).collect::>()) - }), - } + withdrawals: value.withdrawals.map(withdrawals_to_json).transpose()?, + }) } } @@ -1081,10 +1101,10 @@ impl TryFrom for ClientVersionV1 { #[cfg(test)] mod tests { + use bls::{PublicKeyBytes, SignatureBytes}; use ssz::Encode; use types::{ - ConsolidationRequest, DepositRequest, MainnetEthSpec, PublicKeyBytes, RequestType, - SignatureBytes, WithdrawalRequest, + ConsolidationRequest, DepositRequest, MainnetEthSpec, RequestType, WithdrawalRequest, }; use super::*; diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index aa5261c80b0..ba94296b859 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -172,6 +172,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { } } +//TODO(EIP7732): Consider implementing these as methods on the NewPayloadRequest struct impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> { type Error = BeaconStateError; @@ -220,17 +221,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> parent_beacon_block_root: block_ref.parent_root, execution_requests: &block_ref.body.execution_requests, })), - BeaconBlockRef::Gloas(block_ref) => Ok(Self::Gloas(NewPayloadRequestGloas { - execution_payload: &block_ref.body.execution_payload.execution_payload, - versioned_hashes: block_ref - .body - .blob_kzg_commitments - .iter() - .map(kzg_commitment_to_versioned_hash) - .collect(), - parent_beacon_block_root: block_ref.parent_root, - execution_requests: &block_ref.body.execution_requests, - })), + BeaconBlockRef::Gloas(_) => Err(Self::Error::IncorrectStateVariant), } } } @@ -251,11 +242,15 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<' ExecutionPayloadRef::Deneb(_) => Err(Self::Error::IncorrectStateVariant), ExecutionPayloadRef::Electra(_) => Err(Self::Error::IncorrectStateVariant), ExecutionPayloadRef::Fulu(_) => Err(Self::Error::IncorrectStateVariant), + //TODO(EIP7732): Probably time to just get rid of this ExecutionPayloadRef::Gloas(_) => Err(Self::Error::IncorrectStateVariant), } } } +// TODO(EIP-7732) build out the following when it's needed like in Mark's branch +// impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest { + #[cfg(test)] mod test { use crate::versioned_hashes::Error as VersionedHashError; @@ -337,7 +332,7 @@ mod test { *beacon_block .body_mut() .blob_kzg_commitments_mut() - .expect("should get commitments") = commitments.into(); + .expect("should get commitments") = commitments.try_into().unwrap(); let new_payload_request = NewPayloadRequest::try_from(beacon_block.to_ref()) .expect("should create new payload request"); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index a5fa0f34158..554668dd8a7 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -9,6 +9,7 @@ use crate::payload_cache::PayloadCache; use arc_swap::ArcSwapOption; use auth::{Auth, JwtKey, strip_prefix}; pub use block_hash::calculate_execution_block_hash; +use bls::{PublicKeyBytes, Signature}; use builder_client::BuilderHttpClient; pub use engine_api::EngineCapabilities; use engine_api::Error as ApiError; @@ -18,7 +19,6 @@ use engines::{Engine, EngineError}; pub use engines::{EngineState, ForkchoiceState}; use eth2::types::{BlobsBundle, FullPayloadContents}; use eth2::types::{ForkVersionedResponse, builder_bid::SignedBuilderBid}; -use ethers_core::types::Transaction as EthersTransaction; use fixed_bytes::UintExtended; use fork_choice::ForkchoiceUpdateParameters; use logging::crit; @@ -55,8 +55,8 @@ use types::{ }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, - ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, - FullPayload, ProposerPreparationData, PublicKeyBytes, Signature, Slot, + ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, + ProposerPreparationData, Slot, }; mod block_hash; @@ -131,13 +131,6 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { - payload: ExecutionPayloadHeader::Gloas(builder_bid.header).into(), - block_value: builder_bid.value, - kzg_commitments: builder_bid.blob_kzg_commitments, - blobs_and_proofs: None, - requests: Some(builder_bid.execution_requests), - }, }; Ok(ProvenancedPayload::Builder( BlockProposalContentsType::Blinded(block_proposal_contents), @@ -171,11 +164,18 @@ pub enum Error { InvalidPayloadBody(String), InvalidPayloadConversion, InvalidBlobConversion(String), + SszTypesError(ssz_types::Error), BeaconStateError(BeaconStateError), PayloadTypeMismatch, VerifyingVersionedHashes(versioned_hashes::Error), } +impl From for Error { + fn from(e: ssz_types::Error) -> Self { + Error::SszTypesError(e) + } +} + impl From for Error { fn from(e: BeaconStateError) -> Self { Error::BeaconStateError(e) @@ -1361,6 +1361,7 @@ impl ExecutionLayer { } /// Maps to the `engine_newPayload` JSON-RPC call. + /// TODO(EIP-7732) figure out how and why Mark relaxed new_payload_request param's typ to NewPayloadRequest pub async fn notify_new_payload( &self, new_payload_request: NewPayloadRequest<'_, E>, @@ -1832,10 +1833,12 @@ impl ExecutionLayer { ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), ForkName::Fulu => ExecutionPayloadFulu::default().into(), - ForkName::Gloas => ExecutionPayloadGloas::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } + ForkName::Gloas => { + return Err(Error::InvalidForkForPayload); + } }; return Ok(Some(payload)); } @@ -2102,6 +2105,7 @@ enum InvalidBuilderPayload { payload: u64, expected: u64, }, + SszTypesError(ssz_types::Error), } impl fmt::Display for InvalidBuilderPayload { @@ -2143,6 +2147,7 @@ impl fmt::Display for InvalidBuilderPayload { InvalidBuilderPayload::GasLimitMismatch { payload, expected } => { write!(f, "payload gas limit was {} not {}", payload, expected) } + Self::SszTypesError(e) => write!(f, "{:?}", e), } } } @@ -2198,7 +2203,13 @@ fn verify_builder_bid( .withdrawals() .ok() .cloned() - .map(|withdrawals| Withdrawals::::from(withdrawals).tree_hash_root()); + .map(|withdrawals| { + Withdrawals::::try_from(withdrawals) + .map_err(InvalidBuilderPayload::SszTypesError) + .map(|w| w.tree_hash_root()) + }) + .transpose()?; + let payload_withdrawals_root = header.withdrawals_root().ok(); let expected_gas_limit = proposer_gas_limit .and_then(|target_gas_limit| expected_gas_limit(parent_gas_limit, target_gas_limit, spec)); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 44e72cba6be..6b247a4cd49 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -1,4 +1,3 @@ -use crate::EthersTransaction; use crate::engine_api::{ ExecutionBlock, PayloadAttributes, PayloadId, PayloadStatusV1, PayloadStatusV1Status, json_structures::{ @@ -6,13 +5,17 @@ use crate::engine_api::{ }, }; use crate::engines::ForkchoiceState; +use alloy_consensus::TxEnvelope; +use alloy_rpc_types_eth::Transaction as AlloyTransaction; use eth2::types::BlobsBundle; +use fixed_bytes::FixedBytesExtended; use kzg::{Kzg, KzgCommitment, KzgProof}; use parking_lot::Mutex; use rand::{Rng, SeedableRng, rngs::StdRng}; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::VariableList; +use std::cmp::max; use std::collections::HashMap; use std::sync::Arc; use tree_hash::TreeHash; @@ -20,8 +23,8 @@ use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, - ExecutionPayloadGloas, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, - KzgProofs, Slot, Transaction, Transactions, Uint256, + ExecutionPayloadGloas, ExecutionPayloadHeader, ForkName, Hash256, KzgProofs, Transaction, + Transactions, Uint256, }; use super::DEFAULT_TERMINAL_BLOCK; @@ -39,8 +42,8 @@ pub enum Block { PoS(ExecutionPayload), } -pub fn mock_el_extra_data() -> types::VariableList { - "block gen was here".as_bytes().to_vec().into() +pub fn mock_el_extra_data() -> VariableList { + "block gen was here".as_bytes().to_vec().try_into().unwrap() } impl Block { @@ -157,7 +160,6 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, - spec: Arc, } fn make_rng() -> Arc> { @@ -177,7 +179,6 @@ impl ExecutionBlockGenerator { prague_time: Option, osaka_time: Option, amsterdam_time: Option, - spec: Arc, kzg: Option>, ) -> Self { let mut generator = Self { @@ -200,7 +201,6 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), - spec, }; generator.insert_pow_block(0).unwrap(); @@ -265,37 +265,6 @@ impl ExecutionBlockGenerator { ForkName::Bellatrix } - /// Get the timestamp at which `fork` activates. - /// - /// This function will panic if the `fork` is not enabled or is `<= ForkName::Bellatrix`. - pub fn get_fork_timestamp_post_capella(&self, fork: ForkName) -> u64 { - match fork { - ForkName::Gloas => self.amsterdam_time, - ForkName::Fulu => self.osaka_time, - ForkName::Electra => self.prague_time, - ForkName::Deneb => self.cancun_time, - ForkName::Capella => self.shanghai_time, - _ => panic!("only the Capella fork or later is supported"), - } - .unwrap_or_else(|| panic!("fork is {fork} but no corresponding timestamp is set")) - } - - /// This is a slightly nasty method for converting timestamps to slots, but it will suffice - /// until we can plumb through a slot clock. - pub fn timestamp_to_slot_post_capella(&self, timestamp: u64) -> Slot { - let fork = self.get_fork_at_timestamp(timestamp); - let fork_epoch = self.spec.fork_epoch(fork).unwrap(); - let fork_timestamp = self.get_fork_timestamp_post_capella(fork); - - // Number of slots since fork. - let slot_offset = timestamp - .checked_sub(fork_timestamp) - .expect("timestamp should be >= fork timestamp") - / self.spec.seconds_per_slot; - - fork_epoch.start_slot(E::slots_per_epoch()) + Slot::new(slot_offset) - } - pub fn execution_block_by_number(&self, number: u64) -> Option { self.block_by_number(number) .map(|block| block.as_execution_block(self.terminal_total_difficulty)) @@ -633,7 +602,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -642,7 +611,7 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }), PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { ForkName::Bellatrix => ExecutionPayload::Bellatrix(ExecutionPayloadBellatrix { @@ -650,7 +619,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -659,14 +628,14 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), + transactions: vec![].try_into().unwrap(), }), ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { parent_hash: head_block_hash, fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -675,8 +644,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), }), _ => unreachable!(), }, @@ -686,7 +655,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -695,8 +664,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -705,7 +674,7 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, @@ -714,8 +683,8 @@ impl ExecutionBlockGenerator { extra_data: mock_el_extra_data::(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -724,17 +693,17 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: "block gen was here".as_bytes().to_vec().try_into().unwrap(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -743,17 +712,17 @@ impl ExecutionBlockGenerator { fee_recipient: pa.suggested_fee_recipient, receipts_root: Hash256::repeat_byte(42), state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), + logs_bloom: vec![0; 256].try_into().unwrap(), prev_randao: pa.prev_randao, block_number: parent.block_number() + 1, gas_limit: DEFAULT_GAS_LIMIT, gas_used: GAS_USED, timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), + extra_data: "block gen was here".as_bytes().to_vec().try_into().unwrap(), base_fee_per_gas: Uint256::from(1u64), block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), + transactions: vec![].try_into().unwrap(), + withdrawals: pa.withdrawals.clone().try_into().unwrap(), blob_gas_used: 0, excess_blob_gas: 0, }), @@ -763,12 +732,10 @@ impl ExecutionBlockGenerator { let fork_name = execution_payload.fork_name(); if fork_name.deneb_enabled() { - // get random number between 0 and Max Blobs + // get random number between 0 and 1 blobs by default + // For tests that need higher blob count, consider adding a `set_max_blob_count` method let mut rng = self.rng.lock(); - let epoch = self - .timestamp_to_slot_post_capella(execution_payload.timestamp()) - .epoch(E::slots_per_epoch()); - let max_blobs = self.spec.max_blobs_per_block(epoch) as usize; + let max_blobs = max(1, self.min_blobs_count); let num_blobs = rng.random_range(self.min_blobs_count..=max_blobs); let (bundle, transactions) = generate_blobs(num_blobs, fork_name)?; for tx in Vec::from(transactions) { @@ -845,29 +812,30 @@ pub fn generate_blobs( let bundle = if fork_name.fulu_enabled() { let (kzg_commitment, kzg_proofs, blob) = load_test_blobs_bundle_v2::()?; BlobsBundle { - commitments: vec![kzg_commitment; n_blobs].into(), + commitments: vec![kzg_commitment; n_blobs].try_into().unwrap(), proofs: vec![kzg_proofs.to_vec(); n_blobs] .into_iter() .flatten() .collect::>() - .into(), - blobs: vec![blob; n_blobs].into(), + .try_into() + .unwrap(), + blobs: vec![blob; n_blobs].try_into().unwrap(), } } else { let (kzg_commitment, kzg_proof, blob) = load_test_blobs_bundle_v1::()?; BlobsBundle { - commitments: vec![kzg_commitment; n_blobs].into(), - proofs: vec![kzg_proof; n_blobs].into(), - blobs: vec![blob; n_blobs].into(), + commitments: vec![kzg_commitment; n_blobs].try_into().unwrap(), + proofs: vec![kzg_proof; n_blobs].try_into().unwrap(), + blobs: vec![blob; n_blobs].try_into().unwrap(), } }; - Ok((bundle, transactions.into())) + Ok((bundle, transactions.try_into().unwrap())) } pub fn static_valid_tx() -> Result, String> { // This is a real transaction hex encoded, but we don't care about the contents of the transaction. - let transaction: EthersTransaction = serde_json::from_str( + let transaction: AlloyTransaction = serde_json::from_str( r#"{ "blockHash":"0x1d59ff54b1eb26b013ce3cb5fc9dab3705b415a67127a003c3e61eb445bb8df2", "blockNumber":"0x5daf3b", @@ -886,7 +854,8 @@ pub fn static_valid_tx() -> Result(transaction.into()).to_vec()) .map_err(|e| format!("Failed to convert transaction to SSZ: {:?}", e)) } @@ -940,12 +909,8 @@ pub fn generate_genesis_header( *header.transactions_root_mut() = empty_transactions_root; Some(header) } - ForkName::Gloas => { - let mut header = ExecutionPayloadHeader::Gloas(<_>::default()); - *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); - *header.transactions_root_mut() = empty_transactions_root; - Some(header) - } + // TODO(EIP-7732): need to look into this + ForkName::Gloas => None, } } @@ -1009,7 +974,6 @@ mod test { const TERMINAL_DIFFICULTY: u64 = 10; const TERMINAL_BLOCK: u64 = 10; const DIFFICULTY_INCREMENT: u64 = 1; - let spec = Arc::new(MainnetEthSpec::default_spec()); let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( Uint256::from(TERMINAL_DIFFICULTY), @@ -1020,7 +984,6 @@ mod test { None, None, None, - spec, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 7a451beddb3..2168ed8961e 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -252,7 +252,7 @@ pub async fn handle_rpc( Some( ctx.execution_block_generator .write() - .new_payload(request.into()), + .new_payload(request.try_into().unwrap()), ) } else { None @@ -361,98 +361,107 @@ pub async fn handle_rpc( } match method { - ENGINE_GET_PAYLOAD_V1 => { - Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) + ENGINE_GET_PAYLOAD_V1 => Ok(serde_json::to_value( + JsonExecutionPayload::try_from(response).unwrap(), + ) + .unwrap()), + ENGINE_GET_PAYLOAD_V2 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Bellatrix(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseBellatrix { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + }) + .unwrap() + } + JsonExecutionPayload::Capella(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseCapella { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + }) + .unwrap() + } + _ => unreachable!(), + }) } - ENGINE_GET_PAYLOAD_V2 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Bellatrix(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseBellatrix { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - }) - .unwrap() - } - JsonExecutionPayload::Capella(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseCapella { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - }) - .unwrap() - } - _ => unreachable!(), - }), // From v3 onwards, we use the getPayload version only for the corresponding // ExecutionPayload version. So we return an error if the ExecutionPayload version // we get does not correspond to the getPayload version. - ENGINE_GET_PAYLOAD_V3 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Deneb(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseDeneb { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V3 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - }) - .unwrap() - } - _ => unreachable!(), - }), - ENGINE_GET_PAYLOAD_V4 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Electra(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseElectra { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V4 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - // TODO(electra): add EL requests in mock el - execution_requests: Default::default(), - }) - .unwrap() - } - _ => unreachable!(), - }), - ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { - JsonExecutionPayload::Fulu(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseFulu { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } - JsonExecutionPayload::Gloas(execution_payload) => { - serde_json::to_value(JsonGetPayloadResponseGloas { - execution_payload, - block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), - blobs_bundle: maybe_blobs - .ok_or(( - "No blobs returned despite V5 Payload".to_string(), - GENERIC_ERROR_CODE, - ))? - .into(), - should_override_builder: false, - execution_requests: Default::default(), - }) - .unwrap() - } - _ => unreachable!(), - }), + ENGINE_GET_PAYLOAD_V3 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Deneb(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseDeneb { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V3 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + }) + .unwrap() + } + _ => unreachable!(), + }) + } + ENGINE_GET_PAYLOAD_V4 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Electra(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseElectra { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V4 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + // TODO(electra): add EL requests in mock el + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }) + } + ENGINE_GET_PAYLOAD_V5 => { + Ok(match JsonExecutionPayload::try_from(response).unwrap() { + JsonExecutionPayload::Fulu(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseFulu { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + JsonExecutionPayload::Gloas(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseGloas { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }) + } _ => unreachable!(), } } @@ -644,7 +653,8 @@ pub async fn handle_rpc( transactions: payload.transactions().clone(), withdrawals: payload.withdrawals().ok().cloned(), }; - let json_payload_body = JsonExecutionPayloadBodyV1::from(payload_body); + let json_payload_body: JsonExecutionPayloadBodyV1 = + payload_body.try_into().unwrap(); response.push(Some(json_payload_body)); } None => response.push(None), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 9967668a5f8..884aa9bf47a 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -1,6 +1,8 @@ use crate::test_utils::{DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_JWT_SECRET}; use crate::{Config, ExecutionLayer, PayloadAttributes, PayloadParameters}; +use bls::{PublicKeyBytes, SecretKey, Signature}; use bytes::Bytes; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::PublishBlockRequest; use eth2::types::{ BlobsBundle, BlockId, BroadcastValidation, EndpointVersion, EventKind, EventTopic, @@ -14,6 +16,7 @@ use fork_choice::ForkchoiceUpdateParameters; use parking_lot::RwLock; use sensitive_url::SensitiveUrl; use ssz::Encode; +use ssz_types::VariableList; use std::collections::HashMap; use std::fmt::Debug; use std::future::Future; @@ -25,17 +28,16 @@ use tempfile::NamedTempFile; use tokio_stream::StreamExt; use tracing::{debug, error, info, warn}; use tree_hash::TreeHash; +use types::ExecutionBlockHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, - BuilderBidFulu, BuilderBidGloas, SignedBuilderBid, + BuilderBidFulu, SignedBuilderBid, }; use types::{ Address, BeaconState, ChainSpec, Epoch, EthSpec, ExecPayload, ExecutionPayload, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, - ForkVersionedResponse, Hash256, PublicKeyBytes, Signature, SignedBlindedBeaconBlock, - SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, + ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, Hash256, + SignedBlindedBeaconBlock, SignedRoot, SignedValidatorRegistrationData, Slot, Uint256, }; -use types::{ExecutionBlockHash, SecretKey}; use warp::reply::{self, Reply}; use warp::{Filter, Rejection}; @@ -71,8 +73,8 @@ impl Operation { } } -pub fn mock_builder_extra_data() -> types::VariableList { - "mock_builder".as_bytes().to_vec().into() +pub fn mock_builder_extra_data() -> VariableList { + "mock_builder".as_bytes().to_vec().try_into().unwrap() } #[derive(Debug)] @@ -115,9 +117,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.fee_recipient = fee_recipient; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.fee_recipient = fee_recipient; - } } } @@ -138,9 +137,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.gas_limit = gas_limit; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.gas_limit = gas_limit; - } } } @@ -165,9 +161,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.parent_hash = ExecutionBlockHash::from_root(parent_hash); - } } } @@ -188,9 +181,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.prev_randao = prev_randao; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.prev_randao = prev_randao; - } } } @@ -211,9 +201,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.block_number = block_number; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.block_number = block_number; - } } } @@ -234,9 +221,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.timestamp = timestamp; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.timestamp = timestamp; - } } } @@ -257,9 +241,6 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Fulu(header) => { header.withdrawals_root = withdrawals_root; } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.withdrawals_root = withdrawals_root; - } } } @@ -293,10 +274,6 @@ impl BidStuff for BuilderBid { header.extra_data = extra_data; header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); } - ExecutionPayloadHeaderRefMut::Gloas(header) => { - header.extra_data = extra_data; - header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); - } } } } @@ -494,8 +471,9 @@ impl MockBuilder { SignedBlindedBeaconBlock::Fulu(block) => { block.message.body.execution_payload.tree_hash_root() } - SignedBlindedBeaconBlock::Gloas(block) => { - block.message.body.execution_payload.tree_hash_root() + SignedBlindedBeaconBlock::Gloas(_) => { + // TODO(EIP7732) Check if this is how we want to do error handling for gloas + return Err("invalid fork".to_string()); } }; let block_hash = block @@ -611,18 +589,10 @@ impl MockBuilder { ) = payload_response.into(); match fork { - ForkName::Gloas => BuilderBid::Gloas(BuilderBidGloas { - header: payload - .as_gloas() - .map_err(|_| "incorrect payload variant".to_string())? - .into(), - blob_kzg_commitments: maybe_blobs_bundle - .map(|b| b.commitments.clone()) - .unwrap_or_default(), - value: self.get_bid_value(value), - pubkey: self.builder_sk.public_key().compress(), - execution_requests: maybe_requests.unwrap_or_default(), - }), + ForkName::Gloas => { + // TODO(EIP7732) Check if this is how we want to do error handling for gloas + return Err("invalid fork".to_string()); + } ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { header: payload .as_fulu() diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 9e587d4e590..c69edb8f397 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -5,9 +5,10 @@ use crate::{ *, }; use alloy_primitives::B256 as H256; +use fixed_bytes::FixedBytesExtended; use kzg::Kzg; use tempfile::NamedTempFile; -use types::{FixedBytesExtended, MainnetEthSpec}; +use types::MainnetEthSpec; pub struct MockExecutionLayer { pub server: MockServer, @@ -63,7 +64,6 @@ impl MockExecutionLayer { prague_time, osaka_time, amsterdam_time, - spec.clone(), kzg, ); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 712c773dda0..8f129715606 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -22,7 +22,7 @@ use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; use tracing::info; -use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; +use types::{EthSpec, ExecutionBlockHash, Uint256}; use warp::{Filter, Rejection, http::StatusCode}; use crate::EngineCapabilities; @@ -114,7 +114,7 @@ pub struct MockServer { } impl MockServer { - pub fn unit_testing(chain_spec: Arc) -> Self { + pub fn unit_testing() -> Self { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), @@ -126,7 +126,6 @@ impl MockServer { None, // FIXME(electra): should this be the default? None, // FIXME(fulu): should this be the default? None, // FIXME(gloas): should this be the default? - chain_spec, None, ) } @@ -134,7 +133,6 @@ impl MockServer { pub fn new_with_config( handle: &runtime::Handle, config: MockExecutionConfig, - spec: Arc, kzg: Option>, ) -> Self { create_test_tracing_subscriber(); @@ -161,7 +159,6 @@ impl MockServer { prague_time, osaka_time, amsterdam_time, - spec, kzg, ); @@ -226,7 +223,6 @@ impl MockServer { prague_time: Option, osaka_time: Option, amsterdam_time: Option, - spec: Arc, kzg: Option>, ) -> Self { Self::new_with_config( @@ -243,7 +239,6 @@ impl MockServer { osaka_time, amsterdam_time, }, - spec, kzg, ) } diff --git a/beacon_node/execution_layer/src/versioned_hashes.rs b/beacon_node/execution_layer/src/versioned_hashes.rs index 97c3100de99..21cfd5a3223 100644 --- a/beacon_node/execution_layer/src/versioned_hashes.rs +++ b/beacon_node/execution_layer/src/versioned_hashes.rs @@ -1,6 +1,7 @@ use alloy_consensus::TxEnvelope; use alloy_rlp::Decodable; -use types::{EthSpec, ExecutionPayloadRef, Hash256, Unsigned, VersionedHash}; +use typenum::Unsigned; +use types::{EthSpec, ExecutionPayloadRef, Hash256, VersionedHash}; #[derive(Debug)] pub enum Error { diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index 8f6f3516fc5..124231a57e5 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] +bls = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } int_to_bytes = { workspace = true } diff --git a/beacon_node/genesis/src/common.rs b/beacon_node/genesis/src/common.rs index e48fa362046..88a88810d8a 100644 --- a/beacon_node/genesis/src/common.rs +++ b/beacon_node/genesis/src/common.rs @@ -37,10 +37,17 @@ pub fn genesis_deposits( proofs.push(proof); } - Ok(deposit_data + deposit_data .into_iter() .zip(proofs) - .map(|(data, proof)| (data, proof.into())) - .map(|(data, proof)| Deposit { proof, data }) - .collect()) + .map(|(data, proof)| { + let converted_proof = proof + .try_into() + .map_err(|e| format!("Error converting proof: {:?}", e))?; + Ok(Deposit { + proof: converted_proof, + data, + }) + }) + .collect() } diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index dfa4daab9ae..349b8f19c8b 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -1,12 +1,10 @@ use crate::common::genesis_deposits; +use bls::{Keypair, PublicKey, Signature}; use ethereum_hashing::hash; use rayon::prelude::*; use ssz::Encode; use state_processing::initialize_beacon_state_from_eth1; -use types::{ - BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256, Keypair, - PublicKey, Signature, -}; +use types::{BeaconState, ChainSpec, DepositData, EthSpec, ExecutionPayloadHeader, Hash256}; pub const DEFAULT_ETH1_BLOCK_HASH: &[u8] = &[0x42; 32]; diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 7dd0d0223f4..571dab10273 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -8,14 +8,17 @@ autotests = false # using a single test binary com [dependencies] beacon_chain = { workspace = true } beacon_processor = { workspace = true } +bls = { workspace = true } bs58 = "0.4.0" bytes = { workspace = true } +context_deserialize = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } health_metrics = { workspace = true } hex = { workspace = true } diff --git a/beacon_node/http_api/src/beacon/mod.rs b/beacon_node/http_api/src/beacon/mod.rs new file mode 100644 index 00000000000..df5e6eee5cb --- /dev/null +++ b/beacon_node/http_api/src/beacon/mod.rs @@ -0,0 +1,2 @@ +pub mod pool; +pub mod states; diff --git a/beacon_node/http_api/src/beacon/pool.rs b/beacon_node/http_api/src/beacon/pool.rs new file mode 100644 index 00000000000..059573c3175 --- /dev/null +++ b/beacon_node/http_api/src/beacon/pool.rs @@ -0,0 +1,522 @@ +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{NetworkTxFilter, OptionalConsensusVersionHeaderFilter, ResponseFilter}; +use crate::version::{ + ResponseIncludesVersion, V1, V2, add_consensus_version_header, beacon_response, + unsupported_version_rejection, +}; +use crate::{sync_committees, utils}; +use beacon_chain::observed_operations::ObservationOutcome; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{AttestationPoolQuery, EndpointVersion, Failure, GenericResponse}; +use lighthouse_network::PubsubMessage; +use network::NetworkMessage; +use operation_pool::ReceivedPreCapella; +use slot_clock::SlotClock; +use std::collections::HashSet; +use std::sync::Arc; +use tokio::sync::mpsc::UnboundedSender; +use tracing::{debug, info, warn}; +use types::{ + Attestation, AttestationData, AttesterSlashing, ForkName, ProposerSlashing, + SignedBlsToExecutionChange, SignedVoluntaryExit, SingleAttestation, SyncCommitteeMessage, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::reject::convert_rejection; + +pub type BeaconPoolPathFilter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathV2Filter = BoxedFilter<( + TaskSpawner<::EthSpec>, + Arc>, +)>; +pub type BeaconPoolPathAnyFilter = BoxedFilter<( + EndpointVersion, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +/// POST beacon/pool/bls_to_execution_changes +pub fn post_beacon_pool_bls_to_execution_changes( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + address_changes: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let mut failures = vec![]; + + for (index, address_change) in address_changes.into_iter().enumerate() { + let validator_index = address_change.message.validator_index; + + match chain.verify_bls_to_execution_change_for_http_api(address_change) { + Ok(ObservationOutcome::New(verified_address_change)) => { + let validator_index = + verified_address_change.as_inner().message.validator_index; + let address = verified_address_change + .as_inner() + .message + .to_execution_address; + + // New to P2P *and* op pool, gossip immediately if post-Capella. + let received_pre_capella = + if chain.current_slot_is_post_capella().unwrap_or(false) { + ReceivedPreCapella::No + } else { + ReceivedPreCapella::Yes + }; + if matches!(received_pre_capella, ReceivedPreCapella::No) { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::BlsToExecutionChange(Box::new( + verified_address_change.as_inner().clone(), + )), + )?; + } + + // Import to op pool (may return `false` if there's a race). + let imported = chain.import_bls_to_execution_change( + verified_address_change, + received_pre_capella, + ); + + info!( + %validator_index, + ?address, + published = + matches!(received_pre_capella, ReceivedPreCapella::No), + imported, + "Processed BLS to execution change" + ); + } + Ok(ObservationOutcome::AlreadyKnown) => { + debug!(%validator_index, "BLS to execution change already known"); + } + Err(e) => { + warn!( + validator_index, + reason = ?e, + source = "HTTP", + "Invalid BLS to execution change" + ); + failures.push(Failure::new(index, format!("invalid: {e:?}"))); + } + } + } + + if failures.is_empty() { + Ok(()) + } else { + Err(warp_utils::reject::indexed_bad_request( + "some BLS to execution changes failed to verify".into(), + failures, + )) + } + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/bls_to_execution_changes +pub fn get_beacon_pool_bls_to_execution_changes( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("bls_to_execution_changes")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); + Ok(GenericResponse::from(address_changes)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/sync_committees +pub fn post_beacon_pool_sync_committees( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + signatures: Vec, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + sync_committees::process_sync_committee_signatures( + signatures, network_tx, &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/voluntary_exits +pub fn get_beacon_pool_voluntary_exits( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_voluntary_exits(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/voluntary_exits +pub fn post_beacon_pool_voluntary_exits( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("voluntary_exits")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + exit: SignedVoluntaryExit, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_voluntary_exit_for_gossip(exit.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_voluntary_exit(&exit.message); + + if let ObservationOutcome::New(exit) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), + )?; + + chain.import_voluntary_exit(exit); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/proposer_slashings +pub fn get_beacon_pool_proposer_slashings( + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestations = chain.op_pool.get_all_proposer_slashings(); + Ok(GenericResponse::from(attestations)) + }) + }, + ) + .boxed() +} + +/// POST beacon/pool/proposer_slashings +pub fn post_beacon_pool_proposer_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path: &BeaconPoolPathFilter, +) -> ResponseFilter { + beacon_pool_path + .clone() + .and(warp::path("proposer_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + slashing: ProposerSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_proposer_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_proposer_slashing(&slashing); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::ProposerSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_proposer_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attester_slashings +pub fn get_beacon_pool_attester_slashings( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let slashings = chain.op_pool.get_all_attester_slashings(); + + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let slashings = slashings + .into_iter() + .filter(|slashing| { + (fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(slashing, AttesterSlashing::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &slashings); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +// POST beacon/pool/attester_slashings +pub fn post_beacon_pool_attester_slashings( + network_tx_filter: &NetworkTxFilter, + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attester_slashings")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + slashing: AttesterSlashing, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let outcome = chain + .verify_attester_slashing_for_gossip(slashing.clone()) + .map_err(|e| { + warp_utils::reject::object_invalid(format!( + "gossip verification failed: {:?}", + e + )) + })?; + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_attester_slashing(slashing.to_ref()); + + if let ObservationOutcome::New(slashing) = outcome { + utils::publish_pubsub_message( + &network_tx, + PubsubMessage::AttesterSlashing(Box::new( + slashing.clone().into_inner(), + )), + )?; + + chain.import_attester_slashing(slashing); + } + + Ok(()) + }) + }, + ) + .boxed() +} + +/// GET beacon/pool/attestations?committee_index,slot +pub fn get_beacon_pool_attestations( + beacon_pool_path_any: &BeaconPoolPathAnyFilter, +) -> ResponseFilter { + beacon_pool_path_any + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp::query::()) + .then( + |endpoint_version: EndpointVersion, + task_spawner: TaskSpawner, + chain: Arc>, + query: AttestationPoolQuery| { + task_spawner.blocking_response_task(Priority::P1, move || { + let query_filter = |data: &AttestationData, committee_indices: HashSet| { + query.slot.is_none_or(|slot| slot == data.slot) + && query + .committee_index + .is_none_or(|index| committee_indices.contains(&index)) + }; + + let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); + attestations.extend( + chain + .naive_aggregation_pool + .read() + .iter() + .filter(|&att| { + query_filter(att.data(), att.get_committee_indices_map()) + }) + .cloned(), + ); + // Use the current slot to find the fork version, and convert all messages to the + // current fork's format. This is to ensure consistent message types matching + // `Eth-Consensus-Version`. + let current_slot = + chain + .slot_clock + .now() + .ok_or(warp_utils::reject::custom_server_error( + "unable to read slot clock".to_string(), + ))?; + let fork_name = chain.spec.fork_name_at_slot::(current_slot); + let attestations = attestations + .into_iter() + .filter(|att| { + (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) + || (!fork_name.electra_enabled() + && matches!(att, Attestation::Base(_))) + }) + .collect::>(); + + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; + + let res = beacon_response(require_version, &attestations); + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }) + }, + ) + .boxed() +} + +pub fn post_beacon_pool_attestations_v2( + network_tx_filter: &NetworkTxFilter, + optional_consensus_version_header_filter: OptionalConsensusVersionHeaderFilter, + beacon_pool_path_v2: &BeaconPoolPathV2Filter, +) -> ResponseFilter { + beacon_pool_path_v2 + .clone() + .and(warp::path("attestations")) + .and(warp::path::end()) + .and(warp_utils::json::json::>()) + .and(optional_consensus_version_header_filter) + .and(network_tx_filter.clone()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + attestations: Vec, + _fork_name: Option, + network_tx: UnboundedSender>| async move { + let result = crate::publish_attestations::publish_attestations( + task_spawner, + chain, + attestations, + network_tx, + true, + ) + .await + .map(|()| warp::reply::json(&())); + convert_rejection(result).await + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/beacon/states.rs b/beacon_node/http_api/src/beacon/states.rs new file mode 100644 index 00000000000..6d06bcc77d6 --- /dev/null +++ b/beacon_node/http_api/src/beacon/states.rs @@ -0,0 +1,787 @@ +use crate::StateId; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::ResponseFilter; +use crate::validator::pubkey_to_validator_index; +use crate::version::{ + ResponseIncludesVersion, add_consensus_version_header, + execution_optimistic_finalized_beacon_response, +}; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::types::{ + ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, + ValidatorsRequestBody, +}; +use std::sync::Arc; +use types::{ + AttestationShufflingId, CommitteeCache, Error as BeaconStateError, EthSpec, RelativeEpoch, +}; +use warp::filters::BoxedFilter; +use warp::{Filter, Reply}; +use warp_utils::query::multi_key_query; + +type BeaconStatesPath = BoxedFilter<( + StateId, + TaskSpawner<::EthSpec>, + Arc>, +)>; + +// GET beacon/states/{state_id}/pending_consolidations +pub fn get_beacon_state_pending_consolidations( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_consolidations")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(consolidations) = state.pending_consolidations() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending consolidations not found".to_string(), + )); + }; + + Ok(( + consolidations.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_partial_withdrawals +pub fn get_beacon_state_pending_partial_withdrawals( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_partial_withdrawals")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(withdrawals) = state.pending_partial_withdrawals() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending withdrawals not found".to_string(), + )); + }; + + Ok(( + withdrawals.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/pending_deposits +pub fn get_beacon_state_pending_deposits( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("pending_deposits")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (data, execution_optimistic, finalized, fork_name) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let Ok(deposits) = state.pending_deposits() else { + return Err(warp_utils::reject::custom_bad_request( + "Pending deposits not found".to_string(), + )); + }; + + Ok(( + deposits.clone(), + execution_optimistic, + finalized, + state.fork_name_unchecked(), + )) + }, + )?; + + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + data, + ) + .map(|res| warp::reply::json(&res).into_response()) + .map(|resp| add_consensus_version_header(resp, fork_name)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/randao?epoch +pub fn get_beacon_state_randao( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("randao")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::RandaoQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (randao, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); + let randao = *state.get_randao_mix(epoch).map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "epoch out of range: {e:?}" + )) + })?; + Ok((randao, execution_optimistic, finalized)) + }, + )?; + + Ok( + eth2::types::GenericResponse::from(eth2::types::RandaoMix { randao }) + .add_execution_optimistic_finalized(execution_optimistic, finalized), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/sync_committees?epoch +pub fn get_beacon_state_sync_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("sync_committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::SyncCommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (sync_committee, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + Ok(( + state + .get_built_sync_committee(epoch, &chain.spec) + .cloned() + .map_err(|e| match e { + BeaconStateError::SyncCommitteeNotKnown { .. } => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} has no \ + sync committee for epoch {}", + current_epoch, epoch + )) + } + BeaconStateError::IncorrectStateVariant => { + warp_utils::reject::custom_bad_request(format!( + "state at epoch {} is not activated for Altair", + current_epoch, + )) + } + e => warp_utils::reject::beacon_state_error(e), + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + let validators = chain + .validator_indices(sync_committee.pubkeys.iter()) + .map_err(warp_utils::reject::unhandled_error)?; + + let validator_aggregates = validators + .chunks_exact(T::EthSpec::sync_subcommittee_size()) + .map(|indices| eth2::types::SyncSubcommittee { + indices: indices.to_vec(), + }) + .collect(); + + let response = eth2::types::SyncCommitteeByValidatorIndices { + validators, + validator_aggregates, + }; + + Ok(eth2::types::GenericResponse::from(response) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/committees?slot,index,epoch +pub fn get_beacon_state_committees( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("committees")) + .and(warp::query::()) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: eth2::types::CommitteesQuery| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let current_epoch = state.current_epoch(); + let epoch = query.epoch.unwrap_or(current_epoch); + + // Attempt to obtain the committee_cache from the beacon chain + let decision_slot = (epoch.saturating_sub(2u64)) + .end_slot(T::EthSpec::slots_per_epoch()); + // Find the decision block and skip to another method on any kind + // of failure + let shuffling_id = if let Ok(Some(shuffling_decision_block)) = + chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) + { + Some(AttestationShufflingId { + shuffling_epoch: epoch, + shuffling_decision_block, + }) + } else { + None + }; + + // Attempt to read from the chain cache if there exists a + // shuffling_id + let maybe_cached_shuffling = if let Some(shuffling_id) = + shuffling_id.as_ref() + { + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + .and_then(|mut cache_write| cache_write.get(shuffling_id)) + .and_then(|cache_item| cache_item.wait().ok()) + } else { + None + }; + + let committee_cache = + if let Some(shuffling) = maybe_cached_shuffling { + shuffling + } else { + let possibly_built_cache = + match RelativeEpoch::from_epoch(current_epoch, epoch) { + Ok(relative_epoch) + if state.committee_cache_is_initialized( + relative_epoch, + ) => + { + state.committee_cache(relative_epoch).cloned() + } + _ => CommitteeCache::initialized( + state, + epoch, + &chain.spec, + ), + } + .map_err( + |e| match e { + BeaconStateError::EpochOutOfBounds => { + let max_sprp = + T::EthSpec::slots_per_historical_root() + as u64; + let first_subsequent_restore_point_slot = + ((epoch.start_slot( + T::EthSpec::slots_per_epoch(), + ) / max_sprp) + + 1) + * max_sprp; + if epoch < current_epoch { + warp_utils::reject::custom_bad_request( + format!( + "epoch out of bounds, \ + try state at slot {}", + first_subsequent_restore_point_slot, + ), + ) + } else { + warp_utils::reject::custom_bad_request( + "epoch out of bounds, \ + too far in future" + .into(), + ) + } + } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), + }, + )?; + + // Attempt to write to the beacon cache (only if the cache + // size is not the default value). + if chain.config.shuffling_cache_size + != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE + && let Some(shuffling_id) = shuffling_id + && let Some(mut cache_write) = chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(1)) + { + cache_write.insert_committee_cache( + shuffling_id, + &possibly_built_cache, + ); + } + + possibly_built_cache + }; + + // Use either the supplied slot or all slots in the epoch. + let slots = + query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { + epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() + }); + + // Use either the supplied committee index or all available indices. + let indices = + query.index.map(|index| vec![index]).unwrap_or_else(|| { + (0..committee_cache.committees_per_slot()).collect() + }); + + let mut response = Vec::with_capacity(slots.len() * indices.len()); + + for slot in slots { + // It is not acceptable to query with a slot that is not within the + // specified epoch. + if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { + return Err(warp_utils::reject::custom_bad_request( + format!("{} is not in epoch {}", slot, epoch), + )); + } + + for &index in &indices { + let committee = committee_cache + .get_beacon_committee(slot, index) + .ok_or_else(|| { + warp_utils::reject::custom_bad_request(format!( + "committee index {} does not exist in epoch {}", + index, epoch + )) + })?; + + response.push(eth2::types::CommitteeData { + index, + slot, + validators: committee + .committee + .iter() + .map(|i| *i as u64) + .collect(), + }); + } + } + + Ok((response, execution_optimistic, finalized)) + }, + )?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators/{validator_id} +pub fn get_beacon_state_validators_id( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid validator ID".to_string(), + )) + })) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + validator_id: ValidatorId| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let index_opt = match &validator_id { + ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( + &chain, state, pubkey, + ) + .map_err(|e| { + warp_utils::reject::custom_not_found(format!( + "unable to access pubkey cache: {e:?}", + )) + })?, + ValidatorId::Index(index) => Some(*index as usize), + }; + + Ok(( + index_opt + .and_then(|index| { + let validator = state.validators().get(index)?; + let balance = *state.balances().get(index)?; + let epoch = state.current_epoch(); + let far_future_epoch = chain.spec.far_future_epoch; + + Some(eth2::types::ValidatorData { + index: index as u64, + balance, + status: + eth2::types::ValidatorStatus::from_validator( + validator, + epoch, + far_future_epoch, + ), + validator: validator.clone(), + }) + }) + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "unknown validator: {}", + validator_id + )) + })?, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validators +pub fn post_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorsRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.ids, + &query.statuses, + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validators?id,status +pub fn get_beacon_state_validators( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validators")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let query = query_res?; + crate::validators::get_beacon_state_validators( + state_id, + chain, + &query.id, + &query.status, + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_identities +pub fn post_beacon_state_validator_identities( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_identities")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorIdentitiesRequestBody| { + // Prioritise requests for validators at the head. These should be fast to service + // and could be required by the validator client. + let priority = if let StateId(eth2::types::StateId::Head) = state_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + crate::validators::get_beacon_state_validator_identities( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// POST beacon/states/{state_id}/validator_balances +pub fn post_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(warp_utils::json::json_no_body()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query: ValidatorBalancesRequestBody| { + task_spawner.blocking_json_task(Priority::P1, move || { + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + Some(&query.ids), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/validator_balances?id +pub fn get_beacon_state_validator_balances( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("validator_balances")) + .and(warp::path::end()) + .and(multi_key_query::()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>, + query_res: Result| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + crate::validators::get_beacon_state_validator_balances( + state_id, + chain, + query.id.as_deref(), + ) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/finality_checkpoints +pub fn get_beacon_state_finality_checkpoints( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("finality_checkpoints")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (data, execution_optimistic, finalized) = state_id + .map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + Ok(( + eth2::types::FinalityCheckpointsData { + previous_justified: state.previous_justified_checkpoint(), + current_justified: state.current_justified_checkpoint(), + finalized: state.finalized_checkpoint(), + }, + execution_optimistic, + finalized, + )) + }, + )?; + + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/fork +pub fn get_beacon_state_fork( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .clone() + .and(warp::path("fork")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (fork, execution_optimistic, finalized) = + state_id.fork_and_execution_optimistic_and_finalized(&chain)?; + Ok(eth2::types::ExecutionOptimisticFinalizedResponse { + data: fork, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }) + }) + }, + ) + .boxed() +} + +// GET beacon/states/{state_id}/root +pub fn get_beacon_state_root( + beacon_states_path: BeaconStatesPath, +) -> ResponseFilter { + beacon_states_path + .and(warp::path("root")) + .and(warp::path::end()) + .then( + |state_id: StateId, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (root, execution_optimistic, finalized) = state_id.root(&chain)?; + Ok(eth2::types::GenericResponse::from( + eth2::types::RootData::from(root), + )) + .map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index 778067c32bb..ea8b47f91ef 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -2,16 +2,17 @@ use crate::version::inconsistent_fork_rejection; use crate::{ExecutionOptimistic, state_id::checkpoint_slot_and_execution_optimistic}; use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; +use eth2::beacon_response::{ExecutionOptimisticFinalizedMetadata, UnversionedResponse}; use eth2::types::BlockId as CoreBlockId; use eth2::types::DataColumnIndicesQuery; use eth2::types::{BlobIndicesQuery, BlobWrapper, BlobsVersionedHashesQuery}; +use fixed_bytes::FixedBytesExtended; use std::fmt; use std::str::FromStr; use std::sync::Arc; use types::{ - BlobSidecarList, DataColumnSidecarList, EthSpec, FixedBytesExtended, ForkName, Hash256, - SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, UnversionedResponse, - beacon_response::ExecutionOptimisticFinalizedMetadata, + BlobSidecarList, DataColumnSidecarList, EthSpec, ForkName, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, Slot, }; use warp::Rejection; @@ -474,7 +475,7 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + reconstruct_blobs(&chain.kzg, data_columns, blob_indices, block, &chain.spec).map_err( |e| { warp_utils::reject::custom_server_error(format!( "Error reconstructing data columns: {e:?}" diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 9026792b911..58cd2a3bdbc 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -8,6 +8,7 @@ mod aggregate_attestation; mod attestation_performance; mod attester_duties; +mod beacon; mod block_id; mod block_packing_efficiency; mod block_rewards; @@ -29,38 +30,41 @@ mod sync_committees; mod task_spawner; pub mod test_utils; mod ui; +mod utils; mod validator; mod validator_inclusion; mod validators; mod version; + +use crate::beacon::pool::*; use crate::light_client::{get_light_client_bootstrap, get_light_client_updates}; -use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::utils::{AnyVersionFilter, EthV1Filter}; +use crate::validator::post_validator_liveness_epoch; +use crate::validator::*; use crate::version::beacon_response; -use beacon_chain::{ - AttestationError as AttnError, BeaconChain, BeaconChainError, BeaconChainTypes, - WhenSlotSkipped, attestation_verification::VerifiedAttestation, - observed_operations::ObservationOutcome, validator_monitor::timestamp_now, -}; +use beacon::states; +use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use beacon_processor::BeaconProcessorSend; pub use block_id::BlockId; use builder_states::get_next_withdrawals; use bytes::Bytes; +use context_deserialize::ContextDeserialize; use directory::DEFAULT_ROOT_DIR; +use eth2::StatusCode; +use eth2::lighthouse::sync_state::SyncState; use eth2::types::{ - self as api_types, BroadcastValidation, ContextDeserialize, EndpointVersion, ForkChoice, - ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, StateId as CoreStateId, - ValidatorBalancesRequestBody, ValidatorId, ValidatorIdentitiesRequestBody, ValidatorStatus, - ValidatorsRequestBody, + self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceExtraData, + ForkChoiceNode, LightClientUpdatesQuery, PublishBlockRequest, ValidatorId, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use health_metrics::observe::Observe; -use lighthouse_network::rpc::methods::MetaData; -use lighthouse_network::{Enr, NetworkGlobals, PeerId, PubsubMessage, types::SyncState}; +use lighthouse_network::Enr; +use lighthouse_network::NetworkGlobals; +use lighthouse_network::PeerId; use lighthouse_version::version_with_platform; use logging::{SSELoggingComponents, crit}; -use network::{NetworkMessage, NetworkSenders, ValidatorSubscriptionMessage}; +use network::{NetworkMessage, NetworkSenders}; use network_utils::enr_ext::EnrExt; -use operation_pool::ReceivedPreCapella; use parking_lot::RwLock; pub use publish_blocks::{ ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block, @@ -69,7 +73,6 @@ use serde::{Deserialize, Serialize}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::collections::HashSet; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -79,35 +82,26 @@ use std::sync::Arc; use sysinfo::{System, SystemExt}; use system_health::{observe_nat, observe_system_health_bn}; use task_spawner::{Priority, TaskSpawner}; -use tokio::sync::{ - mpsc::{Sender, UnboundedSender}, - oneshot, -}; +use tokio::sync::mpsc::UnboundedSender; use tokio_stream::{ StreamExt, wrappers::{BroadcastStream, errors::BroadcastStreamRecvError}, }; -use tracing::{debug, error, info, warn}; +use tracing::{debug, info, warn}; use types::{ - Attestation, AttestationData, AttestationShufflingId, AttesterSlashing, BeaconStateError, - ChainSpec, Checkpoint, CommitteeCache, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, - ProposerPreparationData, ProposerSlashing, RelativeEpoch, SignedAggregateAndProof, - SignedBlindedBeaconBlock, SignedBlsToExecutionChange, SignedContributionAndProof, - SignedValidatorRegistrationData, SignedVoluntaryExit, SingleAttestation, Slot, - SyncCommitteeMessage, SyncContributionData, + BeaconStateError, Checkpoint, ConfigAndPreset, Epoch, EthSpec, ForkName, Hash256, + SignedBlindedBeaconBlock, Slot, }; -use validator::pubkey_to_validator_index; use version::{ - ResponseIncludesVersion, V1, V2, V3, add_consensus_version_header, add_ssz_content_type_header, + ResponseIncludesVersion, V1, V2, add_consensus_version_header, add_ssz_content_type_header, execution_optimistic_finalized_beacon_response, inconsistent_fork_rejection, unsupported_version_rejection, }; use warp::Reply; -use warp::http::StatusCode; use warp::hyper::Body; use warp::sse::Event; use warp::{Filter, Rejection, http::Response}; -use warp_utils::{query::multi_key_query, reject::convert_rejection, uor::UnifyingOrFilter}; +use warp_utils::{query::multi_key_query, uor::UnifyingOrFilter}; const API_PREFIX: &str = "eth"; @@ -359,16 +353,18 @@ pub fn serve( } // Create a filter that extracts the endpoint version. - let any_version = warp::path(API_PREFIX).and(warp::path::param::().or_else( - |_| async move { - Err(warp_utils::reject::custom_bad_request( - "Invalid version identifier".to_string(), - )) - }, - )); + let any_version = warp::path(API_PREFIX) + .and( + warp::path::param::().or_else(|_| async move { + Err(warp_utils::reject::custom_bad_request( + "Invalid version identifier".to_string(), + )) + }), + ) + .boxed(); // Filter that enforces a single endpoint version and then discards the `EndpointVersion`. - let single_version = |reqd: EndpointVersion| { + fn single_version(any_version: AnyVersionFilter, reqd: EndpointVersion) -> EthV1Filter { any_version .and_then(move |version| async move { if version == reqd { @@ -378,10 +374,11 @@ pub fn serve( } }) .untuple_one() - }; + .boxed() + } - let eth_v1 = single_version(V1); - let eth_v2 = single_version(V2); + let eth_v1 = single_version(any_version.clone(), V1); + let eth_v2 = single_version(any_version.clone(), V2); // Create a `warp` filter that provides access to the network globals. let inner_network_globals = ctx.network_globals.clone(); @@ -402,34 +399,34 @@ pub fn serve( // Create a `warp` filter that provides access to the beacon chain. let inner_ctx = ctx.clone(); - let chain_filter = - warp::any() - .map(move || inner_ctx.chain.clone()) - .and_then(|chain| async move { - match chain { - Some(chain) => Ok(chain), - None => Err(warp_utils::reject::custom_not_found( - "Beacon chain genesis has not yet been observed.".to_string(), - )), - } - }); + let chain_filter = warp::any() + .map(move || inner_ctx.chain.clone()) + .and_then(|chain| async move { + match chain { + Some(chain) => Ok(chain), + None => Err(warp_utils::reject::custom_not_found( + "Beacon chain genesis has not yet been observed.".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network sender channel. let network_tx = ctx .network_senders .as_ref() .map(|senders| senders.network_send()); - let network_tx_filter = - warp::any() - .map(move || network_tx.clone()) - .and_then(|network_tx| async move { - match network_tx { - Some(network_tx) => Ok(network_tx), - None => Err(warp_utils::reject::custom_not_found( - "The networking stack has not yet started (network_tx).".to_string(), - )), - } - }); + let network_tx_filter = warp::any() + .map(move || network_tx.clone()) + .and_then(|network_tx| async move { + match network_tx { + Some(network_tx) => Ok(network_tx), + None => Err(warp_utils::reject::custom_not_found( + "The networking stack has not yet started (network_tx).".to_string(), + )), + } + }) + .boxed(); // Create a `warp` filter that provides access to the network attestation subscription channel. let validator_subscriptions_tx = ctx @@ -446,7 +443,8 @@ pub fn serve( .to_string(), )), } - }); + }) + .boxed(); // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = @@ -486,7 +484,8 @@ pub fn serve( SyncState::Stalled => Ok(()), } }, - ); + ) + .boxed(); // Create a `warp` filter that returns 404s if the light client server is disabled. let light_client_server_filter = @@ -539,8 +538,9 @@ pub fn serve( .beacon_processor_send .clone() .filter(|_| config.enable_beacon_processor); - let task_spawner_filter = - warp::any().map(move || TaskSpawner::new(beacon_processor_send.clone())); + let task_spawner_filter = warp::any() + .map(move || TaskSpawner::new(beacon_processor_send.clone())) + .boxed(); let duplicate_block_status_code = ctx.config.duplicate_block_status_code; @@ -552,6 +552,7 @@ pub fn serve( // GET beacon/genesis let get_beacon_genesis = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("genesis")) .and(warp::path::end()) @@ -575,6 +576,7 @@ pub fn serve( */ let beacon_states_path = eth_v1 + .clone() .and(warp::path("beacon")) .and(warp::path("states")) .and(warp::path::param::().or_else(|_| async { @@ -583,1190 +585,1082 @@ pub fn serve( )) })) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); + .and(chain_filter.clone()) + .boxed(); // GET beacon/states/{state_id}/root - let get_beacon_state_root = beacon_states_path - .clone() - .and(warp::path("root")) - .and(warp::path::end()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = state_id.root(&chain)?; - Ok(api_types::GenericResponse::from(api_types::RootData::from( - root, - ))) - .map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); + let get_beacon_state_root = states::get_beacon_state_root(beacon_states_path.clone()); // GET beacon/states/{state_id}/fork - let get_beacon_state_fork = beacon_states_path + let get_beacon_state_fork = states::get_beacon_state_fork(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/finality_checkpoints + let get_beacon_state_finality_checkpoints = + states::get_beacon_state_finality_checkpoints(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validator_balances?id + let get_beacon_state_validator_balances = + states::get_beacon_state_validator_balances(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validator_balances + let post_beacon_state_validator_balances = + states::post_beacon_state_validator_balances(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validator_identities + let post_beacon_state_validator_identities = + states::post_beacon_state_validator_identities(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validators?id,status + let get_beacon_state_validators = + states::get_beacon_state_validators(beacon_states_path.clone()); + + // POST beacon/states/{state_id}/validators + let post_beacon_state_validators = + states::post_beacon_state_validators(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/validators/{validator_id} + let get_beacon_state_validators_id = + states::get_beacon_state_validators_id(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/committees?slot,index,epoch + let get_beacon_state_committees = + states::get_beacon_state_committees(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/sync_committees?epoch + let get_beacon_state_sync_committees = + states::get_beacon_state_sync_committees(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/randao?epoch + let get_beacon_state_randao = states::get_beacon_state_randao(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_deposits + let get_beacon_state_pending_deposits = + states::get_beacon_state_pending_deposits(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_partial_withdrawals + let get_beacon_state_pending_partial_withdrawals = + states::get_beacon_state_pending_partial_withdrawals(beacon_states_path.clone()); + + // GET beacon/states/{state_id}/pending_consolidations + let get_beacon_state_pending_consolidations = + states::get_beacon_state_pending_consolidations(beacon_states_path.clone()); + + // GET beacon/headers + // + // Note: this endpoint only returns information about blocks in the canonical chain. Given that + // there's a `canonical` flag on the response, I assume it should also return non-canonical + // things. Returning non-canonical things is hard for us since we don't already have a + // mechanism for arbitrary forwards block iteration, we only support iterating forwards along + // the canonical chain. + let get_beacon_headers = eth_v1 .clone() - .and(warp::path("fork")) + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::query::()) .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .then( - |state_id: StateId, + |query: api_types::HeadersQuery, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let (fork, execution_optimistic, finalized) = - state_id.fork_and_execution_optimistic_and_finalized(&chain)?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data: fork, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + let (root, block, execution_optimistic, finalized) = + match (query.slot, query.parent_root) { + // No query parameters, return the canonical head block. + (None, None) => { + let (cached_head, execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + ( + cached_head.head_block_root(), + cached_head.snapshot.beacon_block.clone_as_blinded(), + execution_status.is_optimistic_or_invalid(), + false, + ) + } + // Only the parent root parameter, do a forwards-iterator lookup. + (None, Some(parent_root)) => { + let (parent, execution_optimistic, _parent_finalized) = + BlockId::from_root(parent_root).blinded_block(&chain)?; + let (root, _slot) = chain + .forwards_iter_block_roots(parent.slot()) + .map_err(warp_utils::reject::unhandled_error)? + // Ignore any skip-slots immediately following the parent. + .find(|res| { + res.as_ref().is_ok_and(|(root, _)| *root != parent_root) + }) + .transpose() + .map_err(warp_utils::reject::unhandled_error)? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!( + "child of block with root {}", + parent_root + )) + })?; + + BlockId::from_root(root) + .blinded_block(&chain) + // Ignore this `execution_optimistic` since the first value has + // more information about the original request. + .map(|(block, _execution_optimistic, finalized)| { + (root, block, execution_optimistic, finalized) + })? + } + // Slot is supplied, search by slot and optionally filter by + // parent root. + (Some(slot), parent_root_opt) => { + let (root, execution_optimistic, finalized) = + BlockId::from_slot(slot).root(&chain)?; + // Ignore the second `execution_optimistic`, the first one is the + // most relevant since it knows that we queried by slot. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + // If the parent root was supplied, check that it matches the block + // obtained via a slot lookup. + if let Some(parent_root) = parent_root_opt + && block.parent_root() != parent_root + { + return Err(warp_utils::reject::custom_not_found(format!( + "no canonical block at slot {} with parent root {}", + slot, parent_root + ))); + } + + (root, block, execution_optimistic, finalized) + } + }; + + let data = api_types::BlockHeaderData { + root, + canonical: true, + header: api_types::BlockHeaderAndSignature { + message: block.message().block_header(), + signature: block.signature().clone().into(), + }, + }; + + Ok(api_types::GenericResponse::from(vec![data]) + .add_execution_optimistic_finalized(execution_optimistic, finalized)) }) }, ); - // GET beacon/states/{state_id}/finality_checkpoints - let get_beacon_state_finality_checkpoints = beacon_states_path + // GET beacon/headers/{block_id} + let get_beacon_headers_block_id = eth_v1 .clone() - .and(warp::path("finality_checkpoints")) + .and(warp::path("beacon")) + .and(warp::path("headers")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block ID".to_string(), + )) + })) .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - Ok(( - api_types::FinalityCheckpointsData { - previous_justified: state.previous_justified_checkpoint(), - current_justified: state.current_justified_checkpoint(), - finalized: state.finalized_checkpoint(), - }, - execution_optimistic, - finalized, - )) - }, - )?; + let (root, execution_optimistic, finalized) = block_id.root(&chain)?; + // Ignore the second `execution_optimistic` since the first one has more + // information about the original request. + let (block, _execution_optimistic, _finalized) = + BlockId::from_root(root).blinded_block(&chain)?; + + let canonical = chain + .block_root_at_slot(block.slot(), WhenSlotSkipped::None) + .map_err(warp_utils::reject::unhandled_error)? + .is_some_and(|canonical| root == canonical); + + let data = api_types::BlockHeaderData { + root, + canonical, + header: api_types::BlockHeaderAndSignature { + message: block.message().block_header(), + signature: block.signature().clone().into(), + }, + }; Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, execution_optimistic: Some(execution_optimistic), finalized: Some(finalized), + data, }) }) }, ); - // GET beacon/states/{state_id}/validator_balances?id - let get_beacon_state_validator_balances = beacon_states_path + /* + * beacon/blocks + */ + let consensus_version_header_filter = + warp::header::header::(CONSENSUS_VERSION_HEADER).boxed(); + + let optional_consensus_version_header_filter = + warp::header::optional::(CONSENSUS_VERSION_HEADER).boxed(); + + // POST beacon/blocks + let post_beacon_blocks = eth_v1 .clone() - .and(warp::path("validator_balances")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) .and(warp::path::end()) - .and(multi_key_query::()) + .and(warp::body::json()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - crate::validators::get_beacon_state_validator_balances( - state_id, + move |value: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(request), chain, - query.id.as_deref(), + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validator_balances - let post_beacon_state_validator_balances = beacon_states_path + let post_beacon_blocks_ssz = eth_v1 .clone() - .and(warp::path("validator_balances")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) - .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorBalancesRequestBody| { - task_spawner.blocking_json_task(Priority::P1, move || { - crate::validators::get_beacon_state_validator_balances( - state_id, + .and(warp::body::bytes()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .then( + move |block_bytes: Bytes, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(block_contents), chain, - Some(&query.ids), + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validator_identities - let post_beacon_state_validator_identities = beacon_states_path + let post_beacon_blocks_v2 = eth_v2 .clone() - .and(warp::path("validator_identities")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) .and(warp::path::end()) - .and(warp_utils::json::json_no_body()) + .and(warp::body::json()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorIdentitiesRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validator_identities( - state_id, + move |validation_level: api_types::BroadcastValidationQuery, + value: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let request = PublishBlockRequest::::context_deserialize( + &value, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(request), chain, - Some(&query.ids), + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, ) + .await }) }, ); - // GET beacon/states/{state_id}/validators?id,status - let get_beacon_state_validators = beacon_states_path + let post_beacon_blocks_v2_ssz = eth_v2 .clone() - .and(warp::path("validators")) + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(warp::query::()) .and(warp::path::end()) - .and(multi_key_query::()) + .and(warp::body::bytes()) + .and(consensus_version_header_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query_res: Result| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let query = query_res?; - crate::validators::get_beacon_state_validators( - state_id, + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block_contents = PublishBlockRequest::::from_ssz_bytes( + &block_bytes, + consensus_version, + ) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_block( + None, + ProvenancedBlock::local_from_publish_request(block_contents), chain, - &query.id, - &query.status, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, ) + .await }) }, ); - // POST beacon/states/{state_id}/validators - let post_beacon_state_validators = beacon_states_path + /* + * beacon/blinded_blocks + */ + + // POST beacon/blinded_blocks + let post_beacon_blinded_blocks = eth_v1 .clone() - .and(warp::path("validators")) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) .and(warp::path::end()) .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: ValidatorsRequestBody| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - crate::validators::get_beacon_state_validators( - state_id, + move |block_contents: Arc>, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + publish_blocks::publish_blinded_block( + block_contents, chain, - &query.ids, - &query.statuses, + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, ) + .await }) }, ); - // GET beacon/states/{state_id}/validators/{validator_id} - let get_beacon_state_validators_id = beacon_states_path + // POST beacon/blocks + let post_beacon_blinded_blocks_ssz = eth_v1 .clone() - .and(warp::path("validators")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid validator ID".to_string(), - )) - })) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - validator_id: ValidatorId| { - // Prioritise requests for validators at the head. These should be fast to service - // and could be required by the validator client. - let priority = if let StateId(eth2::types::StateId::Head) = state_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let index_opt = match &validator_id { - ValidatorId::PublicKey(pubkey) => pubkey_to_validator_index( - &chain, state, pubkey, - ) - .map_err(|e| { - warp_utils::reject::custom_not_found(format!( - "unable to access pubkey cache: {e:?}", - )) - })?, - ValidatorId::Index(index) => Some(*index as usize), - }; - - Ok(( - index_opt - .and_then(|index| { - let validator = state.validators().get(index)?; - let balance = *state.balances().get(index)?; - let epoch = state.current_epoch(); - let far_future_epoch = chain.spec.far_future_epoch; - - Some(api_types::ValidatorData { - index: index as u64, - balance, - status: api_types::ValidatorStatus::from_validator( - validator, - epoch, - far_future_epoch, - ), - validator: validator.clone(), - }) - }) - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "unknown validator: {}", - validator_id - )) - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + move |block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + BroadcastValidation::default(), + duplicate_block_status_code, + ) + .await }) }, ); - // GET beacon/states/{state_id}/committees?slot,index,epoch - let get_beacon_state_committees = beacon_states_path + let post_beacon_blinded_blocks_v2 = eth_v2 .clone() - .and(warp::path("committees")) - .and(warp::query::()) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(consensus_version_header_filter) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) .then( - |state_id: StateId, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::CommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (data, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - - // Attempt to obtain the committee_cache from the beacon chain - let decision_slot = (epoch.saturating_sub(2u64)) - .end_slot(T::EthSpec::slots_per_epoch()); - // Find the decision block and skip to another method on any kind - // of failure - let shuffling_id = if let Ok(Some(shuffling_decision_block)) = - chain.block_root_at_slot(decision_slot, WhenSlotSkipped::Prev) - { - Some(AttestationShufflingId { - shuffling_epoch: epoch, - shuffling_decision_block, - }) - } else { - None - }; - - // Attempt to read from the chain cache if there exists a - // shuffling_id - let maybe_cached_shuffling = if let Some(shuffling_id) = - shuffling_id.as_ref() - { - chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - .and_then(|mut cache_write| cache_write.get(shuffling_id)) - .and_then(|cache_item| cache_item.wait().ok()) - } else { - None - }; - - let committee_cache = - if let Some(shuffling) = maybe_cached_shuffling { - shuffling - } else { - let possibly_built_cache = - match RelativeEpoch::from_epoch(current_epoch, epoch) { - Ok(relative_epoch) - if state.committee_cache_is_initialized( - relative_epoch, - ) => - { - state.committee_cache(relative_epoch).cloned() - } - _ => CommitteeCache::initialized( - state, - epoch, - &chain.spec, - ), - } - .map_err( - |e| match e { - BeaconStateError::EpochOutOfBounds => { - let max_sprp = - T::EthSpec::slots_per_historical_root() - as u64; - let first_subsequent_restore_point_slot = - ((epoch.start_slot( - T::EthSpec::slots_per_epoch(), - ) / max_sprp) - + 1) - * max_sprp; - if epoch < current_epoch { - warp_utils::reject::custom_bad_request( - format!( - "epoch out of bounds, \ - try state at slot {}", - first_subsequent_restore_point_slot, - ), - ) - } else { - warp_utils::reject::custom_bad_request( - "epoch out of bounds, \ - too far in future" - .into(), - ) - } - } - _ => warp_utils::reject::unhandled_error( - BeaconChainError::from(e), - ), - }, - )?; - - // Attempt to write to the beacon cache (only if the cache - // size is not the default value). - if chain.config.shuffling_cache_size - != beacon_chain::shuffling_cache::DEFAULT_CACHE_SIZE - && let Some(shuffling_id) = shuffling_id - && let Some(mut cache_write) = chain - .shuffling_cache - .try_write_for(std::time::Duration::from_secs(1)) - { - cache_write.insert_committee_cache( - shuffling_id, - &possibly_built_cache, - ); - } - - possibly_built_cache - }; - - // Use either the supplied slot or all slots in the epoch. - let slots = - query.slot.map(|slot| vec![slot]).unwrap_or_else(|| { - epoch.slot_iter(T::EthSpec::slots_per_epoch()).collect() - }); - - // Use either the supplied committee index or all available indices. - let indices = - query.index.map(|index| vec![index]).unwrap_or_else(|| { - (0..committee_cache.committees_per_slot()).collect() - }); - - let mut response = Vec::with_capacity(slots.len() * indices.len()); - - for slot in slots { - // It is not acceptable to query with a slot that is not within the - // specified epoch. - if slot.epoch(T::EthSpec::slots_per_epoch()) != epoch { - return Err(warp_utils::reject::custom_bad_request( - format!("{} is not in epoch {}", slot, epoch), - )); - } - - for &index in &indices { - let committee = committee_cache - .get_beacon_committee(slot, index) - .ok_or_else(|| { - warp_utils::reject::custom_bad_request(format!( - "committee index {} does not exist in epoch {}", - index, epoch - )) - })?; - - response.push(api_types::CommitteeData { - index, - slot, - validators: committee - .committee - .iter() - .map(|i| *i as u64) - .collect(), - }); - } - } + move |validation_level: api_types::BroadcastValidationQuery, + blinded_block_json: serde_json::Value, + consensus_version: ForkName, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let blinded_block = + SignedBlindedBeaconBlock::::context_deserialize( + &blinded_block_json, + consensus_version, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + blinded_block, + chain, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, + ) + .await + }) + }, + ); - Ok((response, execution_optimistic, finalized)) - }, - )?; - Ok(api_types::ExecutionOptimisticFinalizedResponse { - data, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }) + let post_beacon_blinded_blocks_v2_ssz = eth_v2 + .clone() + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::body::bytes()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(network_tx_filter.clone()) + .then( + move |validation_level: api_types::BroadcastValidationQuery, + block_bytes: Bytes, + task_spawner: TaskSpawner, + chain: Arc>, + network_tx: UnboundedSender>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + let block = SignedBlindedBeaconBlock::::from_ssz_bytes( + &block_bytes, + &chain.spec, + ) + .map(Arc::new) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) + })?; + publish_blocks::publish_blinded_block( + block, + chain, + &network_tx, + validation_level.broadcast_validation, + duplicate_block_status_code, + ) + .await }) }, ); - // GET beacon/states/{state_id}/sync_committees?epoch - let get_beacon_state_sync_committees = beacon_states_path + let block_id_or_err = warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block ID".to_string(), + )) + }); + + let beacon_blocks_path_v1 = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + let beacon_blocks_path_any = any_version + .clone() + .and(warp::path("beacon")) + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + // GET beacon/blocks/{block_id} + let get_beacon_block = beacon_blocks_path_any .clone() - .and(warp::path("sync_committees")) - .and(warp::query::()) .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |endpoint_version: EndpointVersion, + block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>, - query: api_types::SyncCommitteesQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (sync_committee, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let current_epoch = state.current_epoch(); - let epoch = query.epoch.unwrap_or(current_epoch); - Ok(( - state - .get_built_sync_committee(epoch, &chain.spec) - .cloned() - .map_err(|e| match e { - BeaconStateError::SyncCommitteeNotKnown { .. } => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} has no \ - sync committee for epoch {}", - current_epoch, epoch - )) - } - BeaconStateError::IncorrectStateVariant => { - warp_utils::reject::custom_bad_request(format!( - "state at epoch {} is not activated for Altair", - current_epoch, - )) - } - e => warp_utils::reject::beacon_state_error(e), - })?, - execution_optimistic, - finalized, - )) - }, - )?; - - let validators = chain - .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::unhandled_error)?; - - let validator_aggregates = validators - .chunks_exact(T::EthSpec::sync_subcommittee_size()) - .map(|indices| api_types::SyncSubcommittee { - indices: indices.to_vec(), - }) - .collect(); + accept_header: Option| { + task_spawner.spawn_async_with_rejection(Priority::P1, async move { + let (block, execution_optimistic, finalized) = + block_id.full_block(&chain).await?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - let response = api_types::SyncCommitteeByValidatorIndices { - validators, - validator_aggregates, + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), }; - Ok(api_types::GenericResponse::from(response) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => execution_optimistic_finalized_beacon_response( + require_version, + execution_optimistic, + finalized, + block, + ) + .map(|res| warp::reply::json(&res).into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/states/{state_id}/randao?epoch - let get_beacon_state_randao = beacon_states_path + // GET beacon/blocks/{block_id}/root + let get_beacon_block_root = beacon_blocks_path_v1 .clone() - .and(warp::path("randao")) - .and(warp::query::()) + .and(warp::path("root")) .and(warp::path::end()) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::RandaoQuery| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (randao, execution_optimistic, finalized) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let epoch = query.epoch.unwrap_or_else(|| state.current_epoch()); - let randao = *state.get_randao_mix(epoch).map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "epoch out of range: {e:?}" - )) - })?; - Ok((randao, execution_optimistic, finalized)) - }, - )?; - + chain: Arc>| { + // Prioritise requests for the head block root, as it is used by some VCs (including + // the Lighthouse VC) to create sync committee messages. + let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { + Priority::P0 + } else { + Priority::P1 + }; + task_spawner.blocking_json_task(priority, move || { + let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; Ok( - api_types::GenericResponse::from(api_types::RandaoMix { randao }) + api_types::GenericResponse::from(api_types::RootData::from(block_root)) .add_execution_optimistic_finalized(execution_optimistic, finalized), ) }) }, ); - // GET beacon/states/{state_id}/pending_deposits - let get_beacon_state_pending_deposits = beacon_states_path + // GET beacon/blocks/{block_id}/attestations + let get_beacon_block_attestations = beacon_blocks_path_any .clone() - .and(warp::path("pending_deposits")) + .and(warp::path("attestations")) .and(warp::path::end()) .then( - |state_id: StateId, + |endpoint_version: EndpointVersion, + block_id: BlockId, task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(deposits) = state.pending_deposits() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending deposits not found".to_string(), - )); - }; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let atts = block + .message() + .body() + .attestations() + .map(|att| att.clone_as_attestation()) + .collect::>(); - Ok(( - deposits.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), - )) - }, - )?; + let require_version = match endpoint_version { + V1 => ResponseIncludesVersion::No, + V2 => ResponseIncludesVersion::Yes(fork_name), + _ => return Err(unsupported_version_rejection(endpoint_version)), + }; - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), + let res = execution_optimistic_finalized_beacon_response( + require_version, execution_optimistic, finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) - .map(|resp| add_consensus_version_header(resp, fork_name)) + &atts, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) }) }, ); - // GET beacon/states/{state_id}/pending_partial_withdrawals - let get_beacon_state_pending_partial_withdrawals = beacon_states_path + // GET beacon/blinded_blocks/{block_id} + let get_beacon_blinded_block = eth_v1 .clone() - .and(warp::path("pending_partial_withdrawals")) + .and(warp::path("beacon")) + .and(warp::path("blinded_blocks")) + .and(block_id_or_err) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |block_id: BlockId, task_spawner: TaskSpawner, - chain: Arc>| { + chain: Arc>, + accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(withdrawals) = state.pending_partial_withdrawals() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending withdrawals not found".to_string(), - )); - }; + let (block, execution_optimistic, finalized) = + block_id.blinded_block(&chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - Ok(( - withdrawals.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(block.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + block, + ) + .map(|res| warp::reply::json(&res).into_response()) + } + } .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/states/{state_id}/pending_consolidations - let get_beacon_state_pending_consolidations = beacon_states_path + /* + * beacon/blob_sidecars + */ + + // GET beacon/blob_sidecars/{block_id} + let get_blob_sidecars = eth_v1 .clone() - .and(warp::path("pending_consolidations")) + .and(warp::path("beacon")) + .and(warp::path("blob_sidecars")) + .and(block_id_or_err) .and(warp::path::end()) + .and(multi_key_query::()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |state_id: StateId, + |block_id: BlockId, + indices_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { + chain: Arc>, + accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { - let (data, execution_optimistic, finalized, fork_name) = state_id - .map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let Ok(consolidations) = state.pending_consolidations() else { - return Err(warp_utils::reject::custom_bad_request( - "Pending consolidations not found".to_string(), - )); - }; + let indices = indices_res?; + let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = + block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; + let fork_name = block + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; - Ok(( - consolidations.clone(), - execution_optimistic, - finalized, - state.fork_name_unchecked(), + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e )) - }, - )?; - - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - data, - ) - .map(|res| warp::reply::json(&res).into_response()) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &blob_sidecar_list_filtered, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET beacon/headers - // - // Note: this endpoint only returns information about blocks in the canonical chain. Given that - // there's a `canonical` flag on the response, I assume it should also return non-canonical - // things. Returning non-canonical things is hard for us since we don't already have a - // mechanism for arbitrary forwards block iteration, we only support iterating forwards along - // the canonical chain. - let get_beacon_headers = eth_v1 + // GET beacon/blobs/{block_id} + let get_blobs = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("headers")) - .and(warp::query::()) + .and(warp::path("blobs")) + .and(block_id_or_err) .and(warp::path::end()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |query: api_types::HeadersQuery, + |block_id: BlockId, + version_hashes_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, block, execution_optimistic, finalized) = - match (query.slot, query.parent_root) { - // No query parameters, return the canonical head block. - (None, None) => { - let (cached_head, execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::unhandled_error)?; - ( - cached_head.head_block_root(), - cached_head.snapshot.beacon_block.clone_as_blinded(), - execution_status.is_optimistic_or_invalid(), - false, - ) - } - // Only the parent root parameter, do a forwards-iterator lookup. - (None, Some(parent_root)) => { - let (parent, execution_optimistic, _parent_finalized) = - BlockId::from_root(parent_root).blinded_block(&chain)?; - let (root, _slot) = chain - .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::unhandled_error)? - // Ignore any skip-slots immediately following the parent. - .find(|res| { - res.as_ref().is_ok_and(|(root, _)| *root != parent_root) - }) - .transpose() - .map_err(warp_utils::reject::unhandled_error)? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "child of block with root {}", - parent_root - )) - })?; - - BlockId::from_root(root) - .blinded_block(&chain) - // Ignore this `execution_optimistic` since the first value has - // more information about the original request. - .map(|(block, _execution_optimistic, finalized)| { - (root, block, execution_optimistic, finalized) - })? - } - // Slot is supplied, search by slot and optionally filter by - // parent root. - (Some(slot), parent_root_opt) => { - let (root, execution_optimistic, finalized) = - BlockId::from_slot(slot).root(&chain)?; - // Ignore the second `execution_optimistic`, the first one is the - // most relevant since it knows that we queried by slot. - let (block, _execution_optimistic, _finalized) = - BlockId::from_root(root).blinded_block(&chain)?; - - // If the parent root was supplied, check that it matches the block - // obtained via a slot lookup. - if let Some(parent_root) = parent_root_opt - && block.parent_root() != parent_root - { - return Err(warp_utils::reject::custom_not_found(format!( - "no canonical block at slot {} with parent root {}", - slot, parent_root - ))); - } - - (root, block, execution_optimistic, finalized) - } - }; - - let data = api_types::BlockHeaderData { - root, - canonical: true, - header: api_types::BlockHeaderAndSignature { - message: block.message().block_header(), - signature: block.signature().clone().into(), - }, - }; + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let versioned_hashes = version_hashes_res?; + let response = + block_id.get_blobs_by_versioned_hashes(versioned_hashes, &chain)?; - Ok(api_types::GenericResponse::from(vec![data]) - .add_execution_optimistic_finalized(execution_optimistic, finalized)) + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(response.data.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::No, + response.metadata.execution_optimistic.unwrap_or(false), + response.metadata.finalized.unwrap_or(false), + response.data, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } }) }, ); - // GET beacon/headers/{block_id} - let get_beacon_headers_block_id = eth_v1 + /* + * beacon/pool + */ + + let beacon_pool_path = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("headers")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block ID".to_string(), - )) - })) - .and(warp::path::end()) + .and(warp::path("pool")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) - .then( - |block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (root, execution_optimistic, finalized) = block_id.root(&chain)?; - // Ignore the second `execution_optimistic` since the first one has more - // information about the original request. - let (block, _execution_optimistic, _finalized) = - BlockId::from_root(root).blinded_block(&chain)?; + .boxed(); - let canonical = chain - .block_root_at_slot(block.slot(), WhenSlotSkipped::None) - .map_err(warp_utils::reject::unhandled_error)? - .is_some_and(|canonical| root == canonical); + let beacon_pool_path_v2 = eth_v2 + .clone() + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .boxed(); - let data = api_types::BlockHeaderData { - root, - canonical, - header: api_types::BlockHeaderAndSignature { - message: block.message().block_header(), - signature: block.signature().clone().into(), - }, - }; + let beacon_pool_path_any = any_version + .clone() + .and(warp::path("beacon")) + .and(warp::path("pool")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .boxed(); - Ok(api_types::ExecutionOptimisticFinalizedResponse { - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - data, + let post_beacon_pool_attestations_v2 = post_beacon_pool_attestations_v2( + &network_tx_filter, + optional_consensus_version_header_filter, + &beacon_pool_path_v2, + ); + + // GET beacon/pool/attestations?committee_index,slot + let get_beacon_pool_attestations = get_beacon_pool_attestations(&beacon_pool_path_any); + + // POST beacon/pool/attester_slashings + let post_beacon_pool_attester_slashings = + post_beacon_pool_attester_slashings(&network_tx_filter, &beacon_pool_path_any); + + // GET beacon/pool/attester_slashings + let get_beacon_pool_attester_slashings = + get_beacon_pool_attester_slashings(&beacon_pool_path_any); + + // POST beacon/pool/proposer_slashings + let post_beacon_pool_proposer_slashings = + post_beacon_pool_proposer_slashings(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/proposer_slashings + let get_beacon_pool_proposer_slashings = get_beacon_pool_proposer_slashings(&beacon_pool_path); + + // POST beacon/pool/voluntary_exits + let post_beacon_pool_voluntary_exits = + post_beacon_pool_voluntary_exits(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/voluntary_exits + let get_beacon_pool_voluntary_exits = get_beacon_pool_voluntary_exits(&beacon_pool_path); + + // POST beacon/pool/sync_committees + let post_beacon_pool_sync_committees = + post_beacon_pool_sync_committees(&network_tx_filter, &beacon_pool_path); + + // GET beacon/pool/bls_to_execution_changes + let get_beacon_pool_bls_to_execution_changes = + get_beacon_pool_bls_to_execution_changes(&beacon_pool_path); + + // POST beacon/pool/bls_to_execution_changes + let post_beacon_pool_bls_to_execution_changes = + post_beacon_pool_bls_to_execution_changes(&network_tx_filter, &beacon_pool_path); + + let beacon_rewards_path = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); + + // GET beacon/rewards/blocks/{block_id} + let get_beacon_rewards_blocks = beacon_rewards_path + .clone() + .and(warp::path("blocks")) + .and(block_id_or_err) + .and(warp::path::end()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId| { + task_spawner.blocking_json_task(Priority::P1, move || { + let (rewards, execution_optimistic, finalized) = + standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) }) }) }, ); /* - * beacon/blocks + * builder/states */ - let consensus_version_header_filter = - warp::header::header::(CONSENSUS_VERSION_HEADER); - let optional_consensus_version_header_filter = - warp::header::optional::(CONSENSUS_VERSION_HEADER); + let builder_states_path = eth_v1 + .clone() + .and(warp::path("builder")) + .and(warp::path("states")) + .and(chain_filter.clone()); - // POST beacon/blocks - let post_beacon_blocks = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::path::end()) - .and(warp::body::json()) - .and(consensus_version_header_filter) + // GET builder/states/{state_id}/expected_withdrawals + let get_expected_withdrawals = builder_states_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path::param::()) + .and(warp::path("expected_withdrawals")) + .and(warp::query::()) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |value: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let request = PublishBlockRequest::::context_deserialize( - &value, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(request), - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await + |chain: Arc>, + task_spawner: TaskSpawner, + state_id: StateId, + query: api_types::ExpectedWithdrawalsQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let (state, execution_optimistic, finalized) = state_id.state(&chain)?; + let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); + let withdrawals = + get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(withdrawals.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json( + &api_types::ExecutionOptimisticFinalizedResponse { + data: withdrawals, + execution_optimistic: Some(execution_optimistic), + finalized: Some(finalized), + }, + ) + .into_response()), + } }) }, ); - let post_beacon_blocks_ssz = eth_v1 + /* + * beacon/light_client + */ + + let beacon_light_client_path = eth_v1 + .clone() .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(consensus_version_header_filter) + .and(warp::path("light_client")) + .and(light_client_server_filter) + .and(chain_filter.clone()); + + // GET beacon/light_client/bootstrap/{block_root} + let get_beacon_light_client_bootstrap = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path("bootstrap")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid block root value".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |block_bytes: Bytes, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = PublishBlockRequest::::from_ssz_bytes( - &block_bytes, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(block_contents), - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, + task_spawner: TaskSpawner, + block_root: Hash256, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + get_light_client_bootstrap::(chain, &block_root, accept_header) }) }, ); - let post_beacon_blocks_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::json()) - .and(consensus_version_header_filter) + // GET beacon/light_client/optimistic_update + let get_beacon_light_client_optimistic_update = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::path("optimistic_update")) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) .then( - move |validation_level: api_types::BroadcastValidationQuery, - value: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let request = PublishBlockRequest::::context_deserialize( - &value, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, + task_spawner: TaskSpawner, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + let update = chain + .light_client_server_cache + .get_latest_optimistic_update() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientOptimisticUpdate is available".to_string(), + ) + })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(request), - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await + let fork_name = chain + .spec + .fork_name_at_slot::(update.get_slot()); + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(update.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) + .into_response()), + } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - let post_beacon_blocks_v2_ssz = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(consensus_version_header_filter) + // GET beacon/light_client/finality_update + let get_beacon_light_client_finality_update = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block_contents = PublishBlockRequest::::from_ssz_bytes( - &block_bytes, - consensus_version, - ) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_block( - None, - ProvenancedBlock::local_from_publish_request(block_contents), - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - /* - * beacon/blinded_blocks - */ - - // POST beacon/blinded_blocks - let post_beacon_blinded_blocks = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) + .and(warp::path("finality_update")) .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) + .and(warp::header::optional::("accept")) .then( - move |block_contents: Arc>, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - publish_blocks::publish_blinded_block( - block_contents, - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await - }) - }, - ); - - // POST beacon/blocks - let post_beacon_blinded_blocks_ssz = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBlindedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - BroadcastValidation::default(), - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let post_beacon_blinded_blocks_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(consensus_version_header_filter) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - blinded_block_json: serde_json::Value, - consensus_version: ForkName, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let blinded_block = - SignedBlindedBeaconBlock::::context_deserialize( - &blinded_block_json, - consensus_version, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid JSON: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - blinded_block, - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let post_beacon_blinded_blocks_v2_ssz = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::body::bytes()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(network_tx_filter.clone()) - .then( - move |validation_level: api_types::BroadcastValidationQuery, - block_bytes: Bytes, - task_spawner: TaskSpawner, - chain: Arc>, - network_tx: UnboundedSender>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - let block = SignedBlindedBeaconBlock::::from_ssz_bytes( - &block_bytes, - &chain.spec, - ) - .map(Arc::new) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!("invalid SSZ: {e:?}")) - })?; - publish_blocks::publish_blinded_block( - block, - chain, - &network_tx, - validation_level.broadcast_validation, - duplicate_block_status_code, - ) - .await - }) - }, - ); - - let block_id_or_err = warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block ID".to_string(), - )) - }); - - let beacon_blocks_path_v1 = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_blocks_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // GET beacon/blocks/{block_id} - let get_beacon_block = beacon_blocks_path_any - .clone() - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |endpoint_version: EndpointVersion, - block_id: BlockId, - task_spawner: TaskSpawner, + |light_client_server_enabled: Result<(), Rejection>, chain: Arc>, + task_spawner: TaskSpawner, accept_header: Option| { - task_spawner.spawn_async_with_rejection(Priority::P1, async move { - let (block, execution_optimistic, finalized) = - block_id.full_block(&chain).await?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + let update = chain + .light_client_server_cache + .get_latest_finality_update() + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "No LightClientFinalityUpdate is available".to_string(), + ) + })?; + let fork_name = chain + .spec + .fork_name_at_slot::(update.signature_slot()); match accept_header { Some(api_types::Accept::Ssz) => Response::builder() .status(200) - .body(block.as_ssz_bytes().into()) + .body(update.as_ssz_bytes().into()) .map(|res: Response| add_ssz_content_type_header(res)) .map_err(|e| { warp_utils::reject::custom_server_error(format!( @@ -1774,2395 +1668,882 @@ pub fn serve( e )) }), - _ => execution_optimistic_finalized_beacon_response( - require_version, - execution_optimistic, - finalized, - block, - ) - .map(|res| warp::reply::json(&res).into_response()), + _ => Ok(warp::reply::json(&beacon_response( + ResponseIncludesVersion::Yes(fork_name), + update, + )) + .into_response()), } .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/blocks/{block_id}/root - let get_beacon_block_root = beacon_blocks_path_v1 - .clone() - .and(warp::path("root")) - .and(warp::path::end()) - .then( - |block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>| { - // Prioritise requests for the head block root, as it is used by some VCs (including - // the Lighthouse VC) to create sync committee messages. - let priority = if let BlockId(eth2::types::BlockId::Head) = block_id { - Priority::P0 - } else { - Priority::P1 - }; - task_spawner.blocking_json_task(priority, move || { - let (block_root, execution_optimistic, finalized) = block_id.root(&chain)?; - Ok( - api_types::GenericResponse::from(api_types::RootData::from(block_root)) - .add_execution_optimistic_finalized(execution_optimistic, finalized), - ) - }) - }, - ); - - // GET beacon/blocks/{block_id}/attestations - let get_beacon_block_attestations = beacon_blocks_path_any - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .then( - |endpoint_version: EndpointVersion, - block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let atts = block - .message() - .body() - .attestations() - .map(|att| att.clone_as_attestation()) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = execution_optimistic_finalized_beacon_response( - require_version, - execution_optimistic, - finalized, - &atts, - )?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // GET beacon/blinded_blocks/{block_id} - let get_beacon_blinded_block = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blinded_blocks")) - .and(block_id_or_err) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (block, execution_optimistic, finalized) = - block_id.blinded_block(&chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(block.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - block, - ) - .map(|res| warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - /* - * beacon/blob_sidecars - */ - - // GET beacon/blob_sidecars/{block_id} - let get_blob_sidecars = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blob_sidecars")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - indices_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let indices = indices_res?; - let (block, blob_sidecar_list_filtered, execution_optimistic, finalized) = - block_id.get_blinded_block_and_blob_list_filtered(indices, &chain)?; - let fork_name = block - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(blob_sidecar_list_filtered.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &blob_sidecar_list_filtered, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/blobs/{block_id} - let get_blobs = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("blobs")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - version_hashes_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let versioned_hashes = version_hashes_res?; - let response = - block_id.get_blobs_by_versioned_hashes(versioned_hashes, &chain)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(response.data.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::No, - response.metadata.execution_optimistic.unwrap_or(false), - response.metadata.finalized.unwrap_or(false), - response.data, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - }) - }, - ); - - /* - * beacon/pool - */ - - let beacon_pool_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_pool_path_v2 = eth_v2 - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let beacon_pool_path_any = any_version - .and(warp::path("beacon")) - .and(warp::path("pool")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - let post_beacon_pool_attestations_v2 = beacon_pool_path_v2 - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp_utils::json::json::>()) - .and(optional_consensus_version_header_filter) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - attestations: Vec, - _fork_name: Option, - network_tx: UnboundedSender>| async move { - let result = crate::publish_attestations::publish_attestations( - task_spawner, - chain, - attestations, - network_tx, - true, - ) - .await - .map(|()| warp::reply::json(&())); - convert_rejection(result).await - }, - ); - - // GET beacon/pool/attestations?committee_index,slot - let get_beacon_pool_attestations = beacon_pool_path_any - .clone() - .and(warp::path("attestations")) - .and(warp::path::end()) - .and(warp::query::()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - query: api_types::AttestationPoolQuery| { - task_spawner.blocking_response_task(Priority::P1, move || { - let query_filter = |data: &AttestationData, committee_indices: HashSet| { - query.slot.is_none_or(|slot| slot == data.slot) - && query - .committee_index - .is_none_or(|index| committee_indices.contains(&index)) - }; - - let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); - attestations.extend( - chain - .naive_aggregation_pool - .read() - .iter() - .filter(|&att| { - query_filter(att.data(), att.get_committee_indices_map()) - }) - .cloned(), - ); - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = - chain - .slot_clock - .now() - .ok_or(warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ))?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let attestations = attestations - .into_iter() - .filter(|att| { - (fork_name.electra_enabled() && matches!(att, Attestation::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(att, Attestation::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &attestations); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // POST beacon/pool/attester_slashings - let post_beacon_pool_attester_slashings = beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>, - slashing: AttesterSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_attester_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_attester_slashing(slashing.to_ref()); - - if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::AttesterSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_attester_slashing(slashing); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/attester_slashings - let get_beacon_pool_attester_slashings = - beacon_pool_path_any - .clone() - .and(warp::path("attester_slashings")) - .and(warp::path::end()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || { - let slashings = chain.op_pool.get_all_attester_slashings(); - - // Use the current slot to find the fork version, and convert all messages to the - // current fork's format. This is to ensure consistent message types matching - // `Eth-Consensus-Version`. - let current_slot = chain.slot_clock.now().ok_or( - warp_utils::reject::custom_server_error( - "unable to read slot clock".to_string(), - ), - )?; - let fork_name = chain.spec.fork_name_at_slot::(current_slot); - let slashings = slashings - .into_iter() - .filter(|slashing| { - (fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Electra(_))) - || (!fork_name.electra_enabled() - && matches!(slashing, AttesterSlashing::Base(_))) - }) - .collect::>(); - - let require_version = match endpoint_version { - V1 => ResponseIncludesVersion::No, - V2 => ResponseIncludesVersion::Yes(fork_name), - _ => return Err(unsupported_version_rejection(endpoint_version)), - }; - - let res = beacon_response(require_version, &slashings); - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }) - }, - ); - - // POST beacon/pool/proposer_slashings - let post_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - slashing: ProposerSlashing, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_proposer_slashing_for_gossip(slashing.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_proposer_slashing(&slashing); - - if let ObservationOutcome::New(slashing) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::ProposerSlashing(Box::new( - slashing.clone().into_inner(), - )), - )?; - - chain.import_proposer_slashing(slashing); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/proposer_slashings - let get_beacon_pool_proposer_slashings = beacon_pool_path - .clone() - .and(warp::path("proposer_slashings")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_proposer_slashings(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); - - // POST beacon/pool/voluntary_exits - let post_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - exit: SignedVoluntaryExit, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let outcome = chain - .verify_voluntary_exit_for_gossip(exit.clone()) - .map_err(|e| { - warp_utils::reject::object_invalid(format!( - "gossip verification failed: {:?}", - e - )) - })?; - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_voluntary_exit(&exit.message); - - if let ObservationOutcome::New(exit) = outcome { - publish_pubsub_message( - &network_tx, - PubsubMessage::VoluntaryExit(Box::new(exit.clone().into_inner())), - )?; - - chain.import_voluntary_exit(exit); - } - - Ok(()) - }) - }, - ); - - // GET beacon/pool/voluntary_exits - let get_beacon_pool_voluntary_exits = beacon_pool_path - .clone() - .and(warp::path("voluntary_exits")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestations = chain.op_pool.get_all_voluntary_exits(); - Ok(api_types::GenericResponse::from(attestations)) - }) - }, - ); - - // POST beacon/pool/sync_committees - let post_beacon_pool_sync_committees = beacon_pool_path - .clone() - .and(warp::path("sync_committees")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - signatures: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - sync_committees::process_sync_committee_signatures( - signatures, network_tx, &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); - - // GET beacon/pool/bls_to_execution_changes - let get_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let address_changes = chain.op_pool.get_all_bls_to_execution_changes(); - Ok(api_types::GenericResponse::from(address_changes)) - }) - }, - ); - - // POST beacon/pool/bls_to_execution_changes - let post_beacon_pool_bls_to_execution_changes = beacon_pool_path - .clone() - .and(warp::path("bls_to_execution_changes")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - address_changes: Vec, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let mut failures = vec![]; - - for (index, address_change) in address_changes.into_iter().enumerate() { - let validator_index = address_change.message.validator_index; - - match chain.verify_bls_to_execution_change_for_http_api(address_change) { - Ok(ObservationOutcome::New(verified_address_change)) => { - let validator_index = - verified_address_change.as_inner().message.validator_index; - let address = verified_address_change - .as_inner() - .message - .to_execution_address; - - // New to P2P *and* op pool, gossip immediately if post-Capella. - let received_pre_capella = - if chain.current_slot_is_post_capella().unwrap_or(false) { - ReceivedPreCapella::No - } else { - ReceivedPreCapella::Yes - }; - if matches!(received_pre_capella, ReceivedPreCapella::No) { - publish_pubsub_message( - &network_tx, - PubsubMessage::BlsToExecutionChange(Box::new( - verified_address_change.as_inner().clone(), - )), - )?; - } - - // Import to op pool (may return `false` if there's a race). - let imported = chain.import_bls_to_execution_change( - verified_address_change, - received_pre_capella, - ); - - info!( - %validator_index, - ?address, - published = - matches!(received_pre_capella, ReceivedPreCapella::No), - imported, - "Processed BLS to execution change" - ); - } - Ok(ObservationOutcome::AlreadyKnown) => { - debug!(%validator_index, "BLS to execution change already known"); - } - Err(e) => { - warn!( - validator_index, - reason = ?e, - source = "HTTP", - "Invalid BLS to execution change" - ); - failures.push(api_types::Failure::new( - index, - format!("invalid: {e:?}"), - )); - } - } - } - - if failures.is_empty() { - Ok(()) - } else { - Err(warp_utils::reject::indexed_bad_request( - "some BLS to execution changes failed to verify".into(), - failures, - )) - } - }) - }, - ); - - let beacon_rewards_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("rewards")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // GET beacon/rewards/blocks/{block_id} - let get_beacon_rewards_blocks = beacon_rewards_path - .clone() - .and(warp::path("blocks")) - .and(block_id_or_err) - .and(warp::path::end()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - block_id: BlockId| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (rewards, execution_optimistic, finalized) = - standard_block_rewards::compute_beacon_block_rewards(chain, block_id)?; - Ok(api_types::GenericResponse::from(rewards)).map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); - - /* - * builder/states - */ - - let builder_states_path = eth_v1 - .and(warp::path("builder")) - .and(warp::path("states")) - .and(chain_filter.clone()); - - // GET builder/states/{state_id}/expected_withdrawals - let get_expected_withdrawals = builder_states_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path::param::()) - .and(warp::path("expected_withdrawals")) - .and(warp::query::()) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |chain: Arc>, - task_spawner: TaskSpawner, - state_id: StateId, - query: api_types::ExpectedWithdrawalsQuery, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let (state, execution_optimistic, finalized) = state_id.state(&chain)?; - let proposal_slot = query.proposal_slot.unwrap_or(state.slot() + 1); - let withdrawals = - get_next_withdrawals::(&chain, state, state_id, proposal_slot)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(withdrawals.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json( - &api_types::ExecutionOptimisticFinalizedResponse { - data: withdrawals, - execution_optimistic: Some(execution_optimistic), - finalized: Some(finalized), - }, - ) - .into_response()), - } - }) - }, - ); - - /* - * beacon/light_client - */ - - let beacon_light_client_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("light_client")) - .and(light_client_server_filter) - .and(chain_filter.clone()); - - // GET beacon/light_client/bootstrap/{block_root} - let get_beacon_light_client_bootstrap = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("bootstrap")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid block root value".to_string(), - )) - })) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - block_root: Hash256, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - get_light_client_bootstrap::(chain, &block_root, accept_header) - }) - }, - ); - - // GET beacon/light_client/optimistic_update - let get_beacon_light_client_optimistic_update = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("optimistic_update")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - let update = chain - .light_client_server_cache - .get_latest_optimistic_update() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "No LightClientOptimisticUpdate is available".to_string(), - ) - })?; - - let fork_name = chain - .spec - .fork_name_at_slot::(update.get_slot()); - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(update.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&beacon_response( - ResponseIncludesVersion::Yes(fork_name), - update, - )) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/light_client/finality_update - let get_beacon_light_client_finality_update = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("finality_update")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - let update = chain - .light_client_server_cache - .get_latest_finality_update() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "No LightClientFinalityUpdate is available".to_string(), - ) - })?; - - let fork_name = chain - .spec - .fork_name_at_slot::(update.signature_slot()); - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(update.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => Ok(warp::reply::json(&beacon_response( - ResponseIncludesVersion::Yes(fork_name), - update, - )) - .into_response()), - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET beacon/light_client/updates - let get_beacon_light_client_updates = beacon_light_client_path - .clone() - .and(task_spawner_filter.clone()) - .and(warp::path("updates")) - .and(warp::path::end()) - .and(warp::query::()) - .and(warp::header::optional::("accept")) - .then( - |light_client_server_enabled: Result<(), Rejection>, - chain: Arc>, - task_spawner: TaskSpawner, - query: LightClientUpdatesQuery, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - light_client_server_enabled?; - get_light_client_updates::(chain, query, accept_header) - }) - }, - ); - - /* - * beacon/rewards - */ - - let beacon_rewards_path = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("rewards")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()); - - // POST beacon/rewards/attestations/{epoch} - let post_beacon_rewards_attestations = beacon_rewards_path - .clone() - .and(warp::path("attestations")) - .and(warp::path::param::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - epoch: Epoch, - validators: Vec| { - task_spawner.blocking_json_task(Priority::P1, move || { - let attestation_rewards = chain - .compute_attestation_rewards(epoch, validators) - .map_err(|e| match e { - BeaconChainError::MissingBeaconState(root) => { - warp_utils::reject::custom_not_found(format!( - "missing state {root:?}", - )) - } - BeaconChainError::NoStateForSlot(slot) => { - warp_utils::reject::custom_not_found(format!( - "missing state at slot {slot}" - )) - } - BeaconChainError::BeaconStateError( - BeaconStateError::UnknownValidator(validator_index), - ) => warp_utils::reject::custom_bad_request(format!( - "validator is unknown: {validator_index}" - )), - BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { - warp_utils::reject::custom_bad_request(format!( - "validator pubkey is unknown: {pubkey:?}" - )) - } - e => warp_utils::reject::custom_server_error(format!( - "unexpected error: {:?}", - e - )), - })?; - let execution_optimistic = - chain.is_optimistic_or_invalid_head().unwrap_or_default(); - - Ok(api_types::GenericResponse::from(attestation_rewards)) - .map(|resp| resp.add_execution_optimistic(execution_optimistic)) - }) - }, - ); - - // POST beacon/rewards/sync_committee/{block_id} - let post_beacon_rewards_sync_committee = beacon_rewards_path - .clone() - .and(warp::path("sync_committee")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .then( - |task_spawner: TaskSpawner, - chain: Arc>, - block_id: BlockId, - validators: Vec| { - task_spawner.blocking_json_task(Priority::P1, move || { - let (rewards, execution_optimistic, finalized) = - sync_committee_rewards::compute_sync_committee_rewards( - chain, block_id, validators, - )?; - - Ok(api_types::GenericResponse::from(rewards)).map(|resp| { - resp.add_execution_optimistic_finalized(execution_optimistic, finalized) - }) - }) - }, - ); - - /* - * config - */ - - let config_path = eth_v1.and(warp::path("config")); - - // GET config/fork_schedule - let get_config_fork_schedule = config_path - .and(warp::path("fork_schedule")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let forks = ForkName::list_all() - .into_iter() - .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) - .collect::>(); - Ok(api_types::GenericResponse::from(forks)) - }) - }, - ); - - // GET config/spec - let get_config_spec = config_path - .and(warp::path("spec")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - move |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let config_and_preset = - ConfigAndPreset::from_chain_spec::(&chain.spec); - Ok(api_types::GenericResponse::from(config_and_preset)) - }) - }, - ); - - // GET config/deposit_contract - let get_config_deposit_contract = config_path - .and(warp::path("deposit_contract")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - api_types::DepositContractData { - address: chain.spec.deposit_contract_address, - chain_id: chain.spec.deposit_chain_id, - }, - )) - }) - }, - ); - - /* - * debug - */ - - // GET debug/beacon/data_column_sidecars/{block_id} - let get_debug_data_column_sidecars = eth_v1 - .and(warp::path("debug")) - .and(warp::path("beacon")) - .and(warp::path("data_column_sidecars")) - .and(block_id_or_err) - .and(warp::path::end()) - .and(multi_key_query::()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp::header::optional::("accept")) - .then( - |block_id: BlockId, - indices_res: Result, - task_spawner: TaskSpawner, - chain: Arc>, - accept_header: Option| { - task_spawner.blocking_response_task(Priority::P1, move || { - let indices = indices_res?; - let (data_columns, fork_name, execution_optimistic, finalized) = - block_id.get_data_columns(indices, &chain)?; - - match accept_header { - Some(api_types::Accept::Ssz) => Response::builder() - .status(200) - .body(data_columns.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }), - _ => { - // Post as a V2 endpoint so we return the fork version. - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &data_columns, - )?; - Ok(warp::reply::json(&res).into_response()) - } - } - .map(|resp| add_consensus_version_header(resp, fork_name)) - }) - }, - ); - - // GET debug/beacon/states/{state_id} - let get_debug_beacon_states = any_version - .and(warp::path("debug")) - .and(warp::path("beacon")) - .and(warp::path("states")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid state ID".to_string(), - )) - })) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |_endpoint_version: EndpointVersion, - state_id: StateId, - accept_header: Option, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Ssz) => { - // We can ignore the optimistic status for the "fork" since it's a - // specification constant that doesn't change across competing heads of the - // beacon chain. - let t = std::time::Instant::now(); - let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let timer = metrics::start_timer(&metrics::HTTP_API_STATE_SSZ_ENCODE_TIMES); - let response_bytes = state.as_ssz_bytes(); - drop(timer); - debug!( - total_time_ms = t.elapsed().as_millis(), - target_slot = %state.slot(), - "HTTP state load" - ); - - Response::builder() - .status(200) - .body(response_bytes.into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map(|resp: warp::reply::Response| { - add_consensus_version_header(resp, fork_name) - }) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - } - _ => state_id.map_state_and_execution_optimistic_and_finalized( - &chain, - |state, execution_optimistic, finalized| { - let fork_name = state - .fork_name(&chain.spec) - .map_err(inconsistent_fork_rejection)?; - let res = execution_optimistic_finalized_beacon_response( - ResponseIncludesVersion::Yes(fork_name), - execution_optimistic, - finalized, - &state, - )?; - Ok(add_consensus_version_header( - warp::reply::json(&res).into_response(), - fork_name, - )) - }, - ), - }) - }, - ); - - // GET debug/beacon/heads - let get_debug_beacon_heads = any_version - .and(warp::path("debug")) - .and(warp::path("beacon")) - .and(warp::path("heads")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |endpoint_version: EndpointVersion, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let heads = chain - .heads() - .into_iter() - .map(|(root, slot)| { - let execution_optimistic = if endpoint_version == V1 { - None - } else if endpoint_version == V2 { - chain - .canonical_head - .fork_choice_read_lock() - .is_optimistic_or_invalid_block(&root) - .ok() - } else { - return Err(unsupported_version_rejection(endpoint_version)); - }; - Ok(api_types::ChainHeadData { - slot, - root, - execution_optimistic, - }) - }) - .collect::, warp::Rejection>>(); - Ok(api_types::GenericResponse::from(heads?)) - }) - }, - ); - - // GET debug/fork_choice - let get_debug_fork_choice = eth_v1 - .and(warp::path("debug")) - .and(warp::path("fork_choice")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); - - let proto_array = beacon_fork_choice.proto_array().core_proto_array(); - - let fork_choice_nodes = proto_array - .nodes - .iter() - .map(|node| { - let execution_status = if node.execution_status.is_execution_enabled() { - Some(node.execution_status.to_string()) - } else { - None - }; - - ForkChoiceNode { - slot: node.slot, - block_root: node.root, - parent_root: node - .parent - .and_then(|index| proto_array.nodes.get(index)) - .map(|parent| parent.root), - justified_epoch: node.justified_checkpoint.epoch, - finalized_epoch: node.finalized_checkpoint.epoch, - weight: node.weight, - validity: execution_status, - execution_block_hash: node - .execution_status - .block_hash() - .map(|block_hash| block_hash.into_root()), - } - }) - .collect::>(); - Ok(ForkChoice { - justified_checkpoint: proto_array.justified_checkpoint, - finalized_checkpoint: proto_array.finalized_checkpoint, - fork_choice_nodes, - }) - }) - }, - ); - - /* - * node - */ - - // GET node/identity - let get_node_identity = eth_v1 - .and(warp::path("node")) - .and(warp::path("identity")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let enr = network_globals.local_enr(); - let p2p_addresses = enr.multiaddr_p2p_tcp(); - let discovery_addresses = enr.multiaddr_p2p_udp(); - Ok(api_types::GenericResponse::from(api_types::IdentityData { - peer_id: network_globals.local_peer_id().to_base58(), - enr, - p2p_addresses, - discovery_addresses, - metadata: from_meta_data::( - &network_globals.local_metadata, - &chain.spec, - ), - })) - }) - }, - ); - - // GET node/version - let get_node_version = eth_v1 - .and(warp::path("node")) - .and(warp::path("version")) - .and(warp::path::end()) - // Bypass the `task_spawner` since this method returns a static string. - .then(|| async { - warp::reply::json(&api_types::GenericResponse::from(api_types::VersionData { - version: version_with_platform(), - })) - .into_response() - }); - - // GET node/syncing - let get_node_syncing = eth_v1 - .and(warp::path("node")) - .and(warp::path("syncing")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - async move { - let el_offline = if let Some(el) = &chain.execution_layer { - el.is_offline_or_erroring().await - } else { - true - }; - - task_spawner - .blocking_json_task(Priority::P0, move || { - let (head, head_execution_status) = chain - .canonical_head - .head_and_execution_status() - .map_err(warp_utils::reject::unhandled_error)?; - let head_slot = head.head_slot(); - let current_slot = - chain.slot_clock.now_or_genesis().ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to read slot clock".into(), - ) - })?; - - // Taking advantage of saturating subtraction on slot. - let sync_distance = current_slot - head_slot; - - let is_optimistic = head_execution_status.is_optimistic_or_invalid(); - - // When determining sync status, make an exception for single-node - // testnets with 0 peers. - let sync_state = network_globals.sync_state.read(); - let is_synced = sync_state.is_synced() - || (sync_state.is_stalled() - && network_globals.config.target_peers == 0); - drop(sync_state); - - let syncing_data = api_types::SyncingData { - is_syncing: !is_synced, - is_optimistic, - el_offline, - head_slot, - sync_distance, - }; - - Ok(api_types::GenericResponse::from(syncing_data)) - }) - .await - } + }) }, ); - // GET node/health - let get_node_health = eth_v1 - .and(warp::path("node")) - .and(warp::path("health")) - .and(warp::path::end()) + // GET beacon/light_client/updates + let get_beacon_light_client_updates = beacon_light_client_path + .clone() .and(task_spawner_filter.clone()) - .and(network_globals.clone()) - .and(chain_filter.clone()) + .and(warp::path("updates")) + .and(warp::path::end()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) .then( - |task_spawner: TaskSpawner, - network_globals: Arc>, - chain: Arc>| { - async move { - let el_offline = if let Some(el) = &chain.execution_layer { - el.is_offline_or_erroring().await - } else { - true - }; + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, + task_spawner: TaskSpawner, + query: LightClientUpdatesQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; + get_light_client_updates::(chain, query, accept_header) + }) + }, + ); - task_spawner - .blocking_response_task(Priority::P0, move || { - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::unhandled_error)?; + /* + * beacon/rewards + */ - let is_syncing = !network_globals.sync_state.read().is_synced(); + let beacon_rewards_path = eth_v1 + .clone() + .and(warp::path("beacon")) + .and(warp::path("rewards")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()); - if el_offline { - Err(warp_utils::reject::not_synced( - "execution layer is offline".to_string(), + // POST beacon/rewards/attestations/{epoch} + let post_beacon_rewards_attestations = beacon_rewards_path + .clone() + .and(warp::path("attestations")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + epoch: Epoch, + validators: Vec| { + task_spawner.blocking_json_task(Priority::P1, move || { + let attestation_rewards = chain + .compute_attestation_rewards(epoch, validators) + .map_err(|e| match e { + BeaconChainError::MissingBeaconState(root) => { + warp_utils::reject::custom_not_found(format!( + "missing state {root:?}", )) - } else if is_syncing || is_optimistic { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::PARTIAL_CONTENT, + } + BeaconChainError::NoStateForSlot(slot) => { + warp_utils::reject::custom_not_found(format!( + "missing state at slot {slot}" )) - } else { - Ok(warp::reply::with_status( - warp::reply(), - warp::http::StatusCode::OK, + } + BeaconChainError::BeaconStateError( + BeaconStateError::UnknownValidator(validator_index), + ) => warp_utils::reject::custom_bad_request(format!( + "validator is unknown: {validator_index}" + )), + BeaconChainError::ValidatorPubkeyUnknown(pubkey) => { + warp_utils::reject::custom_bad_request(format!( + "validator pubkey is unknown: {pubkey:?}" )) } - }) - .await - } + e => warp_utils::reject::custom_server_error(format!( + "unexpected error: {:?}", + e + )), + })?; + let execution_optimistic = + chain.is_optimistic_or_invalid_head().unwrap_or_default(); + + Ok(api_types::GenericResponse::from(attestation_rewards)) + .map(|resp| resp.add_execution_optimistic(execution_optimistic)) + }) }, ); - // GET node/peers/{peer_id} - let get_node_peers_by_id = eth_v1 - .and(warp::path("node")) - .and(warp::path("peers")) - .and(warp::path::param::()) + // POST beacon/rewards/sync_committee/{block_id} + let post_beacon_rewards_sync_committee = beacon_rewards_path + .clone() + .and(warp::path("sync_committee")) + .and(block_id_or_err) .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(warp_utils::json::json()) .then( - |requested_peer_id: String, - task_spawner: TaskSpawner, - network_globals: Arc>| { + |task_spawner: TaskSpawner, + chain: Arc>, + block_id: BlockId, + validators: Vec| { task_spawner.blocking_json_task(Priority::P1, move || { - let peer_id = PeerId::from_bytes( - &bs58::decode(requested_peer_id.as_str()) - .into_vec() - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "invalid peer id: {}", - e - )) - })?, - ) - .map_err(|_| { - warp_utils::reject::custom_bad_request("invalid peer id.".to_string()) - })?; - - if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { - let address = if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { - multiaddr.to_string() - } else if let Some(addr) = peer_info.listening_addresses().first() { - addr.to_string() - } else { - String::new() - }; + let (rewards, execution_optimistic, finalized) = + sync_committee_rewards::compute_sync_committee_rewards( + chain, block_id, validators, + )?; - // the eth2 API spec implies only peers we have been connected to at some point should be included. - if let Some(&dir) = peer_info.connection_direction() { - return Ok(api_types::GenericResponse::from(api_types::PeerData { - peer_id: peer_id.to_string(), - enr: peer_info.enr().map(|enr| enr.to_base64()), - last_seen_p2p_address: address, - direction: dir.into(), - state: peer_info.connection_status().clone().into(), - })); - } - } - Err(warp_utils::reject::custom_not_found( - "peer not found.".to_string(), - )) + Ok(api_types::GenericResponse::from(rewards)).map(|resp| { + resp.add_execution_optimistic_finalized(execution_optimistic, finalized) + }) }) }, ); - // GET node/peers - let get_node_peers = eth_v1 - .and(warp::path("node")) - .and(warp::path("peers")) + /* + * config + */ + + let config_path = eth_v1.clone().and(warp::path("config")); + + // GET config/fork_schedule + let get_config_fork_schedule = config_path + .clone() + .and(warp::path("fork_schedule")) .and(warp::path::end()) - .and(multi_key_query::()) .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(chain_filter.clone()) .then( - |query_res: Result, - task_spawner: TaskSpawner, - network_globals: Arc>| { + |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let query = query_res?; - let mut peers: Vec = Vec::new(); - network_globals - .peers - .read() - .peers() - .for_each(|(peer_id, peer_info)| { - let address = - if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { - multiaddr.to_string() - } else if let Some(addr) = peer_info.listening_addresses().first() { - addr.to_string() - } else { - String::new() - }; - - // the eth2 API spec implies only peers we have been connected to at some point should be included. - if let Some(&dir) = peer_info.connection_direction() { - let direction = dir.into(); - let state = peer_info.connection_status().clone().into(); - - let state_matches = query - .state - .as_ref() - .is_none_or(|states| states.contains(&state)); - let direction_matches = query - .direction - .as_ref() - .is_none_or(|directions| directions.contains(&direction)); + let forks = ForkName::list_all() + .into_iter() + .filter_map(|fork_name| chain.spec.fork_for_name(fork_name)) + .collect::>(); + Ok(api_types::GenericResponse::from(forks)) + }) + }, + ); - if state_matches && direction_matches { - peers.push(api_types::PeerData { - peer_id: peer_id.to_string(), - enr: peer_info.enr().map(|enr| enr.to_base64()), - last_seen_p2p_address: address, - direction, - state, - }); - } - } - }); - Ok(api_types::PeersData { - meta: api_types::PeersMetaData { - count: peers.len() as u64, - }, - data: peers, - }) + // GET config/spec + let get_config_spec = config_path + .clone() + .and(warp::path("spec")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + move |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let config_and_preset = + ConfigAndPreset::from_chain_spec::(&chain.spec); + Ok(api_types::GenericResponse::from(config_and_preset)) }) }, ); - // GET node/peer_count - let get_node_peer_count = eth_v1 - .and(warp::path("node")) - .and(warp::path("peer_count")) + // GET config/deposit_contract + let get_config_deposit_contract = config_path + .and(warp::path("deposit_contract")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(network_globals.clone()) + .and(chain_filter.clone()) .then( - |task_spawner: TaskSpawner, - network_globals: Arc>| { + |task_spawner: TaskSpawner, chain: Arc>| { task_spawner.blocking_json_task(Priority::P1, move || { - let mut connected: u64 = 0; - let mut connecting: u64 = 0; - let mut disconnected: u64 = 0; - let mut disconnecting: u64 = 0; - - network_globals - .peers - .read() - .peers() - .for_each(|(_, peer_info)| { - let state = - api_types::PeerState::from(peer_info.connection_status().clone()); - match state { - api_types::PeerState::Connected => connected += 1, - api_types::PeerState::Connecting => connecting += 1, - api_types::PeerState::Disconnected => disconnected += 1, - api_types::PeerState::Disconnecting => disconnecting += 1, - } - }); - - Ok(api_types::GenericResponse::from(api_types::PeerCount { - connected, - connecting, - disconnected, - disconnecting, - })) + Ok(api_types::GenericResponse::from( + api_types::DepositContractData { + address: chain.spec.deposit_contract_address, + chain_id: chain.spec.deposit_chain_id, + }, + )) }) }, ); + /* - * validator + * debug */ - // GET validator/duties/proposer/{epoch} - let get_validator_duties_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("proposer")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + // GET debug/beacon/data_column_sidecars/{block_id} + let get_debug_data_column_sidecars = eth_v1 + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("data_column_sidecars")) + .and(block_id_or_err) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) + .and(warp::header::optional::("accept")) .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, + |block_id: BlockId, + indices_res: Result, task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - proposer_duties::proposer_duties(epoch, &chain) + chain: Arc>, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + let indices = indices_res?; + let (data_columns, fork_name, execution_optimistic, finalized) = + block_id.get_data_columns(indices, &chain)?; + + match accept_header { + Some(api_types::Accept::Ssz) => Response::builder() + .status(200) + .body(data_columns.as_ssz_bytes().into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }), + _ => { + // Post as a V2 endpoint so we return the fork version. + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &data_columns, + )?; + Ok(warp::reply::json(&res).into_response()) + } + } + .map(|resp| add_consensus_version_header(resp, fork_name)) }) }, ); - // GET validator/blocks/{slot} - let get_validator_blocks = any_version - .and(warp::path("validator")) - .and(warp::path("blocks")) - .and(warp::path::param::().or_else(|_| async { + // GET debug/beacon/states/{state_id} + let get_debug_beacon_states = any_version + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("states")) + .and(warp::path::param::().or_else(|_| async { Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), + "Invalid state ID".to_string(), )) })) .and(warp::path::end()) .and(warp::header::optional::("accept")) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |endpoint_version: EndpointVersion, - slot: Slot, + |_endpoint_version: EndpointVersion, + state_id: StateId, accept_header: Option, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - debug!(?slot, "Block production request from HTTP API"); - - not_synced_filter?; + task_spawner.blocking_response_task(Priority::P1, move || match accept_header { + Some(api_types::Accept::Ssz) => { + // We can ignore the optimistic status for the "fork" since it's a + // specification constant that doesn't change across competing heads of the + // beacon chain. + let t = std::time::Instant::now(); + let (state, _execution_optimistic, _finalized) = state_id.state(&chain)?; + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let timer = metrics::start_timer(&metrics::HTTP_API_STATE_SSZ_ENCODE_TIMES); + let response_bytes = state.as_ssz_bytes(); + drop(timer); + debug!( + total_time_ms = t.elapsed().as_millis(), + target_slot = %state.slot(), + "HTTP state load" + ); - if endpoint_version == V3 { - produce_block_v3(accept_header, chain, slot, query).await - } else { - produce_block_v2(accept_header, chain, slot, query).await + Response::builder() + .status(200) + .body(response_bytes.into()) + .map(|res: Response| add_ssz_content_type_header(res)) + .map(|resp: warp::reply::Response| { + add_consensus_version_header(resp, fork_name) + }) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) } + _ => state_id.map_state_and_execution_optimistic_and_finalized( + &chain, + |state, execution_optimistic, finalized| { + let fork_name = state + .fork_name(&chain.spec) + .map_err(inconsistent_fork_rejection)?; + let res = execution_optimistic_finalized_beacon_response( + ResponseIncludesVersion::Yes(fork_name), + execution_optimistic, + finalized, + &state, + )?; + Ok(add_consensus_version_header( + warp::reply::json(&res).into_response(), + fork_name, + )) + }, + ), }) }, ); - // GET validator/blinded_blocks/{slot} - let get_validator_blinded_blocks = eth_v1 - .and(warp::path("validator")) - .and(warp::path("blinded_blocks")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid slot".to_string(), - )) - })) + // GET debug/beacon/heads + let get_debug_beacon_heads = any_version + .clone() + .and(warp::path("debug")) + .and(warp::path("beacon")) + .and(warp::path("heads")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp::query::()) - .and(warp::header::optional::("accept")) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |slot: Slot, - not_synced_filter: Result<(), Rejection>, - query: api_types::ValidatorBlocksQuery, - accept_header: Option, + |endpoint_version: EndpointVersion, task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - produce_blinded_block_v2(accept_header, chain, slot, query).await + task_spawner.blocking_json_task(Priority::P1, move || { + let heads = chain + .heads() + .into_iter() + .map(|(root, slot)| { + let execution_optimistic = if endpoint_version == V1 { + None + } else if endpoint_version == V2 { + chain + .canonical_head + .fork_choice_read_lock() + .is_optimistic_or_invalid_block(&root) + .ok() + } else { + return Err(unsupported_version_rejection(endpoint_version)); + }; + Ok(api_types::ChainHeadData { + slot, + root, + execution_optimistic, + }) + }) + .collect::, warp::Rejection>>(); + Ok(api_types::GenericResponse::from(heads?)) }) }, ); - // GET validator/attestation_data?slot,committee_index - let get_validator_attestation_data = eth_v1 - .and(warp::path("validator")) - .and(warp::path("attestation_data")) + // GET debug/fork_choice + let get_debug_fork_choice = eth_v1 + .clone() + .and(warp::path("debug")) + .and(warp::path("fork_choice")) .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) .and(chain_filter.clone()) .then( - |query: api_types::ValidatorAttestationDataQuery, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - - let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; + |task_spawner: TaskSpawner, chain: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let beacon_fork_choice = chain.canonical_head.fork_choice_read_lock(); - // allow a tolerance of one slot to account for clock skew - if query.slot > current_slot + 1 { - return Err(warp_utils::reject::custom_bad_request(format!( - "request slot {} is more than one slot past the current slot {}", - query.slot, current_slot - ))); - } + let proto_array = beacon_fork_choice.proto_array().core_proto_array(); - chain - .produce_unaggregated_attestation(query.slot, query.committee_index) - .map(|attestation| attestation.data().clone()) - .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::unhandled_error) - }) - }, - ); + let fork_choice_nodes = proto_array + .nodes + .iter() + .map(|node| { + let execution_status = if node.execution_status.is_execution_enabled() { + Some(node.execution_status.to_string()) + } else { + None + }; - // GET validator/aggregate_attestation?attestation_data_root,slot - let get_validator_aggregate_attestation = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_attestation")) - .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |endpoint_version: EndpointVersion, - query: api_types::ValidatorAggregateAttestationQuery, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_response_task(Priority::P0, move || { - not_synced_filter?; - crate::aggregate_attestation::get_aggregate_attestation( - query.slot, - &query.attestation_data_root, - query.committee_index, - endpoint_version, - chain, - ) + ForkChoiceNode { + slot: node.slot, + block_root: node.root, + parent_root: node + .parent + .and_then(|index| proto_array.nodes.get(index)) + .map(|parent| parent.root), + justified_epoch: node.justified_checkpoint.epoch, + finalized_epoch: node.finalized_checkpoint.epoch, + weight: node.weight, + validity: execution_status, + execution_block_hash: node + .execution_status + .block_hash() + .map(|block_hash| block_hash.into_root()), + extra_data: ForkChoiceExtraData { + target_root: node.target_root, + justified_root: node.justified_checkpoint.root, + finalized_root: node.finalized_checkpoint.root, + unrealized_justified_root: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_finalized_root: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_justified_epoch: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + unrealized_finalized_epoch: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + execution_status: node.execution_status.to_string(), + best_child: node + .best_child + .and_then(|index| proto_array.nodes.get(index)) + .map(|child| child.root), + best_descendant: node + .best_descendant + .and_then(|index| proto_array.nodes.get(index)) + .map(|descendant| descendant.root), + }, + } + }) + .collect::>(); + Ok(ForkChoice { + justified_checkpoint: beacon_fork_choice.justified_checkpoint(), + finalized_checkpoint: beacon_fork_choice.finalized_checkpoint(), + fork_choice_nodes, + }) }) }, ); - // POST validator/duties/attester/{epoch} - let post_validator_duties_attester = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("attester")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + /* + * node + */ + + // GET node/identity + let get_node_identity = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("identity")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, + |task_spawner: TaskSpawner, + network_globals: Arc>, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - attester_duties::attester_duties(epoch, &indices.0, &chain) + task_spawner.blocking_json_task(Priority::P1, move || { + let enr = network_globals.local_enr(); + let p2p_addresses = enr.multiaddr_p2p_tcp(); + let discovery_addresses = enr.multiaddr_p2p_udp(); + Ok(api_types::GenericResponse::from(api_types::IdentityData { + peer_id: network_globals.local_peer_id().to_base58(), + enr: enr.to_base64(), + p2p_addresses: p2p_addresses.iter().map(|a| a.to_string()).collect(), + discovery_addresses: discovery_addresses + .iter() + .map(|a| a.to_string()) + .collect(), + metadata: utils::from_meta_data::( + &network_globals.local_metadata, + &chain.spec, + ), + })) }) }, ); - // POST validator/duties/sync/{epoch} - let post_validator_duties_sync = eth_v1 - .and(warp::path("validator")) - .and(warp::path("duties")) - .and(warp::path("sync")) - .and(warp::path::param::().or_else(|_| async { - Err(warp_utils::reject::custom_bad_request( - "Invalid epoch".to_string(), - )) - })) + // GET node/version + let get_node_version = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("version")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - not_synced_filter: Result<(), Rejection>, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::sync_committee_duties(epoch, &indices.0, &chain) - }) - }, - ); + // Bypass the `task_spawner` since this method returns a static string. + .then(|| async { + warp::reply::json(&api_types::GenericResponse::from(api_types::VersionData { + version: version_with_platform(), + })) + .into_response() + }); - // GET validator/sync_committee_contribution - let get_validator_sync_committee_contribution = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_contribution")) + // GET node/syncing + let get_node_syncing = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("syncing")) .and(warp::path::end()) - .and(warp::query::()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) .then( - |sync_committee_data: SyncContributionData, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, + |task_spawner: TaskSpawner, + network_globals: Arc>, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - chain - .get_aggregated_sync_committee_contribution(&sync_committee_data) - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "unable to fetch sync contribution: {:?}", - e - )) - })? - .map(api_types::GenericResponse::from) - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "no matching sync contribution found".to_string(), - ) + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; + + task_spawner + .blocking_json_task(Priority::P0, move || { + let (head, head_execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::unhandled_error)?; + let head_slot = head.head_slot(); + let current_slot = + chain.slot_clock.now_or_genesis().ok_or_else(|| { + warp_utils::reject::custom_server_error( + "Unable to read slot clock".into(), + ) + })?; + + // Taking advantage of saturating subtraction on slot. + let sync_distance = current_slot - head_slot; + + let is_optimistic = head_execution_status.is_optimistic_or_invalid(); + + // When determining sync status, make an exception for single-node + // testnets with 0 peers. + let sync_state = network_globals.sync_state.read(); + let is_synced = sync_state.is_synced() + || (sync_state.is_stalled() + && network_globals.config.target_peers == 0); + drop(sync_state); + + let syncing_data = api_types::SyncingData { + is_syncing: !is_synced, + is_optimistic, + el_offline, + head_slot, + sync_distance, + }; + + Ok(api_types::GenericResponse::from(syncing_data)) }) - }) + .await + } }, ); - // POST validator/aggregate_and_proofs - let post_validator_aggregate_and_proofs = any_version - .and(warp::path("validator")) - .and(warp::path("aggregate_and_proofs")) + // GET node/health + let get_node_health = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("health")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) + .and(network_globals.clone()) .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) .then( - // V1 and V2 are identical except V2 has a consensus version header in the request. - // We only require this header for SSZ deserialization, which isn't supported for - // this endpoint presently. - |_endpoint_version: EndpointVersion, - not_synced_filter: Result<(), Rejection>, - task_spawner: TaskSpawner, - chain: Arc>, - aggregates: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - let seen_timestamp = timestamp_now(); - let mut verified_aggregates = Vec::with_capacity(aggregates.len()); - let mut messages = Vec::with_capacity(aggregates.len()); - let mut failures = Vec::new(); - - // Verify that all messages in the post are valid before processing further - for (index, aggregate) in aggregates.iter().enumerate() { - match chain.verify_aggregated_attestation_for_gossip(aggregate) { - Ok(verified_aggregate) => { - messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( - verified_aggregate.aggregate().clone(), - ))); - - // Notify the validator monitor. - chain - .validator_monitor - .read() - .register_api_aggregated_attestation( - seen_timestamp, - verified_aggregate.aggregate(), - verified_aggregate.indexed_attestation(), - &chain.slot_clock, - ); - - verified_aggregates.push((index, verified_aggregate)); - } - // If we already know the attestation, don't broadcast it or attempt to - // further verify it. Return success. - // - // It's reasonably likely that two different validators produce - // identical aggregates, especially if they're using the same beacon - // node. - Err(AttnError::AttestationSupersetKnown(_)) => continue, - // If we've already seen this aggregator produce an aggregate, just - // skip this one. - // - // We're likely to see this with VCs that use fallback BNs. The first - // BN might time-out *after* publishing the aggregate and then the - // second BN will indicate it's already seen the aggregate. - // - // There's no actual error for the user or the network since the - // aggregate has been successfully published by some other node. - Err(AttnError::AggregatorAlreadyKnown(_)) => continue, - Err(e) => { - error!( - error = ?e, - request_index = index, - aggregator_index = aggregate.message().aggregator_index(), - attestation_index = aggregate.message().aggregate().committee_index(), - attestation_slot = %aggregate.message().aggregate().data().slot, - "Failure verifying aggregate and proofs" - ); - failures.push(api_types::Failure::new(index, format!("Verification: {:?}", e))); - } - } - } + |task_spawner: TaskSpawner, + network_globals: Arc>, + chain: Arc>| { + async move { + let el_offline = if let Some(el) = &chain.execution_layer { + el.is_offline_or_erroring().await + } else { + true + }; - // Publish aggregate attestations to the libp2p network - if !messages.is_empty() { - publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; - } + task_spawner + .blocking_response_task(Priority::P0, move || { + let is_optimistic = chain + .is_optimistic_or_invalid_head() + .map_err(warp_utils::reject::unhandled_error)?; - // Import aggregate attestations - for (index, verified_aggregate) in verified_aggregates { - if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { - error!( - error = ?e, - request_index = index, - aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), - attestation_index = verified_aggregate.attestation().committee_index(), - attestation_slot = %verified_aggregate.attestation().data().slot, - "Failure applying verified aggregate attestation to fork choice" - ); - failures.push(api_types::Failure::new(index, format!("Fork choice: {:?}", e))); - } - if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { - warn!( - error = ?e, - request_index = index, - "Could not add verified aggregate attestation to the inclusion pool" - ); - failures.push(api_types::Failure::new(index, format!("Op pool: {:?}", e))); - } - } + let is_syncing = !network_globals.sync_state.read().is_synced(); - if !failures.is_empty() { - Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), - failures, - )) - } else { - Ok(()) - } - }) + if el_offline { + Err(warp_utils::reject::not_synced( + "execution layer is offline".to_string(), + )) + } else if is_syncing || is_optimistic { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::PARTIAL_CONTENT, + )) + } else { + Ok(warp::reply::with_status( + warp::reply(), + warp::http::StatusCode::OK, + )) + } + }) + .await + } }, ); - let post_validator_contribution_and_proofs = eth_v1 - .and(warp::path("validator")) - .and(warp::path("contribution_and_proofs")) + // GET node/peers/{peer_id} + let get_node_peers_by_id = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peers")) + .and(warp::path::param::()) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) - .and(network_tx_filter.clone()) + .and(network_globals.clone()) .then( - |not_synced_filter: Result<(), Rejection>, + |requested_peer_id: String, task_spawner: TaskSpawner, - chain: Arc>, - contributions: Vec>, - network_tx: UnboundedSender>| { - task_spawner.blocking_json_task(Priority::P0, move || { - not_synced_filter?; - sync_committees::process_signed_contribution_and_proofs( - contributions, - network_tx, - &chain, - )?; - Ok(api_types::GenericResponse::from(())) - }) - }, - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let peer_id = PeerId::from_bytes( + &bs58::decode(requested_peer_id.as_str()) + .into_vec() + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "invalid peer id: {}", + e + )) + })?, + ) + .map_err(|_| { + warp_utils::reject::custom_bad_request("invalid peer id.".to_string()) + })?; - // POST validator/beacon_committee_subscriptions - let post_validator_beacon_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("beacon_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter.clone()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |committee_subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions - .iter() - .map(|subscription| { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - api_types::ValidatorSubscription { - attestation_committee_index: subscription.committee_index, - slot: subscription.slot, - committee_count_at_slot: subscription.committees_at_slot, - is_aggregator: subscription.is_aggregator, - } - }) - .collect(); + if let Some(peer_info) = network_globals.peers.read().peer_info(&peer_id) { + let address = if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() + } else if let Some(addr) = peer_info.listening_addresses().first() { + addr.to_string() + } else { + String::new() + }; - let message = - ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process committee subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down" - .to_string(), - )); + // the eth2 API spec implies only peers we have been connected to at some point should be included. + if let Some(&dir) = peer_info.connection_direction() { + return Ok(api_types::GenericResponse::from(api_types::PeerData { + peer_id: peer_id.to_string(), + enr: peer_info.enr().map(|enr| enr.to_base64()), + last_seen_p2p_address: address, + direction: dir.into(), + state: peer_info.connection_status().clone().into(), + })); + } } - Ok(()) + Err(warp_utils::reject::custom_not_found( + "peer not found.".to_string(), + )) }) }, ); - // POST validator/prepare_beacon_proposer - let post_validator_prepare_beacon_proposer = eth_v1 - .and(warp::path("validator")) - .and(warp::path("prepare_beacon_proposer")) + // GET node/peers + let get_node_peers = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peers")) .and(warp::path::end()) - .and(not_while_syncing_filter.clone()) - .and(network_tx_filter.clone()) + .and(multi_key_query::()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) + .and(network_globals.clone()) .then( - |not_synced_filter: Result<(), Rejection>, - network_tx: UnboundedSender>, + |query_res: Result, task_spawner: TaskSpawner, - chain: Arc>, - preparation_data: Vec| { - task_spawner.spawn_async_with_rejection(Priority::P0, async move { - not_synced_filter?; - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = preparation_data.len(), - "Received proposer preparation data" - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let query = query_res?; + let mut peers: Vec = Vec::new(); + network_globals + .peers + .read() + .peers() + .for_each(|(peer_id, peer_info)| { + let address = + if let Some(multiaddr) = peer_info.seen_multiaddrs().next() { + multiaddr.to_string() + } else if let Some(addr) = peer_info.listening_addresses().first() { + addr.to_string() + } else { + String::new() + }; - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|data| (data, &None)), - ) - .await; + // the eth2 API spec implies only peers we have been connected to at some point should be included. + if let Some(&dir) = peer_info.connection_direction() { + let direction = dir.into(); + let state = peer_info.connection_status().clone().into(); - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + let state_matches = query + .state + .as_ref() + .is_none_or(|states| states.contains(&state)); + let direction_matches = query + .direction + .as_ref() + .is_none_or(|directions| directions.contains(&direction)); - if chain.spec.is_peer_das_scheduled() { - let (finalized_beacon_state, _, _) = - StateId(CoreStateId::Finalized).state(&chain)?; - let validators_and_balances = preparation_data - .iter() - .filter_map(|preparation| { - if let Ok(effective_balance) = finalized_beacon_state - .get_effective_balance(preparation.validator_index as usize) - { - Some((preparation.validator_index as usize, effective_balance)) - } else { - None + if state_matches && direction_matches { + peers.push(api_types::PeerData { + peer_id: peer_id.to_string(), + enr: peer_info.enr().map(|enr| enr.to_base64()), + last_seen_p2p_address: address, + direction, + state, + }); } - }) - .collect::>(); - - let current_slot = - chain.slot().map_err(warp_utils::reject::unhandled_error)?; - if let Some(cgc_change) = chain - .data_availability_checker - .custody_context() - .register_validators(validators_and_balances, current_slot, &chain.spec) - { - chain.update_data_column_custody_info(Some( - cgc_change - .effective_epoch - .start_slot(T::EthSpec::slots_per_epoch()), - )); - - network_tx.send(NetworkMessage::CustodyCountChanged { - new_custody_group_count: cgc_change.new_custody_group_count, - sampling_count: cgc_change.sampling_count, - }).unwrap_or_else(|e| { - debug!(error = %e, "Could not send message to the network service. \ - Likely shutdown") - }); - } - } - - Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + } + }); + Ok(api_types::PeersData { + meta: api_types::PeersMetaData { + count: peers.len() as u64, + }, + data: peers, + }) }) }, ); - // POST validator/register_validator - let post_validator_register_validator = eth_v1 - .and(warp::path("validator")) - .and(warp::path("register_validator")) + // GET node/peer_count + let get_node_peer_count = eth_v1 + .clone() + .and(warp::path("node")) + .and(warp::path("peer_count")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .and(warp_utils::json::json()) + .and(network_globals.clone()) .then( |task_spawner: TaskSpawner, - chain: Arc>, - register_val_data: Vec| async { - let (tx, rx) = oneshot::channel(); - - let initial_result = task_spawner - .spawn_async_with_rejection_no_conversion(Priority::P0, async move { - let execution_layer = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain - .slot_clock - .now_or_genesis() - .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::unhandled_error)?; - let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); - - debug!( - count = register_val_data.len(), - "Received register validator request" - ); + network_globals: Arc>| { + task_spawner.blocking_json_task(Priority::P1, move || { + let mut connected: u64 = 0; + let mut connecting: u64 = 0; + let mut disconnected: u64 = 0; + let mut disconnecting: u64 = 0; - let head_snapshot = chain.head_snapshot(); - let spec = &chain.spec; + network_globals + .peers + .read() + .peers() + .for_each(|(_, peer_info)| { + let state = + api_types::PeerState::from(peer_info.connection_status().clone()); + match state { + api_types::PeerState::Connected => connected += 1, + api_types::PeerState::Connecting => connecting += 1, + api_types::PeerState::Disconnected => disconnected += 1, + api_types::PeerState::Disconnecting => disconnecting += 1, + } + }); - let (preparation_data, filtered_registration_data): ( - Vec<(ProposerPreparationData, Option)>, - Vec, - ) = register_val_data - .into_iter() - .filter_map(|register_data| { - chain - .validator_index(®ister_data.message.pubkey) - .ok() - .flatten() - .and_then(|validator_index| { - let validator = head_snapshot - .beacon_state - .get_validator(validator_index) - .ok()?; - let validator_status = ValidatorStatus::from_validator( - validator, - current_epoch, - spec.far_future_epoch, - ) - .superstatus(); - let is_active_or_pending = - matches!(validator_status, ValidatorStatus::Pending) - || matches!( - validator_status, - ValidatorStatus::Active - ); - - // Filter out validators who are not 'active' or 'pending'. - is_active_or_pending.then_some({ - ( - ( - ProposerPreparationData { - validator_index: validator_index as u64, - fee_recipient: register_data - .message - .fee_recipient, - }, - Some(register_data.message.gas_limit), - ), - register_data, - ) - }) - }) - }) - .unzip(); + Ok(api_types::GenericResponse::from(api_types::PeerCount { + connected, + connecting, + disconnected, + disconnecting, + })) + }) + }, + ); + /* + * validator + */ - // Update the prepare beacon proposer cache based on this request. - execution_layer - .update_proposer_preparation( - current_epoch, - preparation_data.iter().map(|(data, limit)| (data, limit)), - ) - .await; - - // Call prepare beacon proposer blocking with the latest update in order to make - // sure we have a local payload to fall back to in the event of the blinded block - // flow failing. - chain - .prepare_beacon_proposer(current_slot) - .await - .map_err(|e| { - warp_utils::reject::custom_bad_request(format!( - "error updating proposer preparations: {:?}", - e - )) - })?; + // GET validator/duties/proposer/{epoch} + let get_validator_duties_proposer = get_validator_duties_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - info!( - count = filtered_registration_data.len(), - "Forwarding register validator request to connected builder" - ); + // GET validator/blocks/{slot} + let get_validator_blocks = get_validator_blocks( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // It's a waste of a `BeaconProcessor` worker to just - // wait on a response from the builder (especially since - // they have frequent timeouts). Spawn a new task and - // send the response back to our original HTTP request - // task via a channel. - let builder_future = async move { - let arc_builder = chain - .execution_layer - .as_ref() - .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::unhandled_error)? - .builder(); - let builder = arc_builder - .as_ref() - .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::unhandled_error)?; - builder - .post_builder_validators(&filtered_registration_data) - .await - .map(|resp| warp::reply::json(&resp).into_response()) - .map_err(|e| { - warn!( - num_registrations = filtered_registration_data.len(), - error = ?e, - "Relay error when registering validator(s)" - ); - // Forward the HTTP status code if we are able to, otherwise fall back - // to a server error. - if let eth2::Error::ServerMessage(message) = e { - if message.code == StatusCode::BAD_REQUEST.as_u16() { - return warp_utils::reject::custom_bad_request( - message.message, - ); - } else { - // According to the spec this response should only be a 400 or 500, - // so we fall back to a 500 here. - return warp_utils::reject::custom_server_error( - message.message, - ); - } - } - warp_utils::reject::custom_server_error(format!("{e:?}")) - }) - }; - tokio::task::spawn(async move { tx.send(builder_future.await) }); + // GET validator/blinded_blocks/{slot} + let get_validator_blinded_blocks = get_validator_blinded_blocks( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // Just send a generic 200 OK from this closure. We'll - // ignore the `Ok` variant and form a proper response - // from what is sent back down the channel. - Ok(warp::reply::reply().into_response()) - }) - .await; + // GET validator/attestation_data?slot,committee_index + let get_validator_attestation_data = get_validator_attestation_data( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - if initial_result.is_err() { - return convert_rejection(initial_result).await; - } + // GET validator/aggregate_attestation?attestation_data_root,slot + let get_validator_aggregate_attestation = get_validator_aggregate_attestation( + any_version.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // Await a response from the builder without blocking a - // `BeaconProcessor` worker. - convert_rejection(rx.await.unwrap_or_else(|_| { - Ok(warp::reply::with_status( - warp::reply::json(&"No response from channel"), - eth2::StatusCode::INTERNAL_SERVER_ERROR, - ) - .into_response()) - })) - .await - }, - ); - // POST validator/sync_committee_subscriptions - let post_validator_sync_committee_subscriptions = eth_v1 - .and(warp::path("validator")) - .and(warp::path("sync_committee_subscriptions")) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(validator_subscription_tx_filter) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |subscriptions: Vec, - validator_subscription_tx: Sender, - task_spawner: TaskSpawner, - chain: Arc>, - | { - task_spawner.blocking_json_task(Priority::P0, move || { - for subscription in subscriptions { - chain - .validator_monitor - .write() - .auto_register_local_validator(subscription.validator_index); - - let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { - subscriptions: vec![subscription], - }; - if let Err(e) = validator_subscription_tx.try_send(message) { - warn!( - info = "the host may be overloaded or resource-constrained", - error = ?e, - "Unable to process sync subscriptions" - ); - return Err(warp_utils::reject::custom_server_error( - "unable to queue subscription, host may be overloaded or shutting down".to_string(), - )); - } - } + // POST validator/duties/attester/{epoch} + let post_validator_duties_attester = post_validator_duties_attester( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - Ok(()) - }) - }, - ); + // POST validator/duties/sync/{epoch} + let post_validator_duties_sync = post_validator_duties_sync( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - // POST validator/liveness/{epoch} - let post_validator_liveness_epoch = eth_v1 - .and(warp::path("validator")) - .and(warp::path("liveness")) - .and(warp::path::param::()) - .and(warp::path::end()) - .and(warp_utils::json::json()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |epoch: Epoch, - indices: api_types::ValidatorIndexData, - task_spawner: TaskSpawner, - chain: Arc>| { - task_spawner.blocking_json_task(Priority::P0, move || { - // Ensure the request is for either the current, previous or next epoch. - let current_epoch = - chain.epoch().map_err(warp_utils::reject::unhandled_error)?; - let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); - let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + // GET validator/sync_committee_contribution + let get_validator_sync_committee_contribution = get_validator_sync_committee_contribution( + eth_v1.clone().clone(), + chain_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - if epoch < prev_epoch || epoch > next_epoch { - return Err(warp_utils::reject::custom_bad_request(format!( - "request epoch {} is more than one epoch from the current epoch {}", - epoch, current_epoch - ))); - } + // POST validator/aggregate_and_proofs + let post_validator_aggregate_and_proofs = post_validator_aggregate_and_proofs( + any_version.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - let liveness: Vec = indices - .0 - .iter() - .cloned() - .map(|index| { - let is_live = chain.validator_seen_at_epoch(index as usize, epoch); - api_types::StandardLivenessResponseData { index, is_live } - }) - .collect(); + let post_validator_contribution_and_proofs = post_validator_contribution_and_proofs( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); - Ok(api_types::GenericResponse::from(liveness)) - }) - }, + // POST validator/beacon_committee_subscriptions + let post_validator_beacon_committee_subscriptions = + post_validator_beacon_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), ); + // POST validator/prepare_beacon_proposer + let post_validator_prepare_beacon_proposer = post_validator_prepare_beacon_proposer( + eth_v1.clone().clone(), + chain_filter.clone(), + network_tx_filter.clone(), + not_while_syncing_filter.clone(), + task_spawner_filter.clone(), + ); + + // POST validator/register_validator + let post_validator_register_validator = post_validator_register_validator( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); + // POST validator/sync_committee_subscriptions + let post_validator_sync_committee_subscriptions = post_validator_sync_committee_subscriptions( + eth_v1.clone().clone(), + chain_filter.clone(), + validator_subscription_tx_filter.clone(), + task_spawner_filter.clone(), + ); + + // POST validator/liveness/{epoch} + let post_validator_liveness_epoch = post_validator_liveness_epoch( + eth_v1.clone().clone(), + chain_filter.clone(), + task_spawner_filter.clone(), + ); + // POST lighthouse/finalize let post_lighthouse_finalize = warp::path("lighthouse") .and(warp::path("finalize")) @@ -4233,7 +2614,10 @@ pub fn serve( ); network_globals.add_trusted_peer(enr.clone()); - publish_network_message(&network_tx, NetworkMessage::ConnectTrustedPeer(enr))?; + utils::publish_network_message( + &network_tx, + NetworkMessage::ConnectTrustedPeer(enr), + )?; Ok(()) }) @@ -4264,7 +2648,7 @@ pub fn serve( ); network_globals.remove_trusted_peer(enr.clone()); - publish_network_message( + utils::publish_network_message( &network_tx, NetworkMessage::DisconnectTrustedPeer(enr), )?; @@ -4716,6 +3100,7 @@ pub fn serve( ); let get_events = eth_v1 + .clone() .and(warp::path("events")) .and(warp::path::end()) .and(multi_key_query::()) @@ -5037,70 +3422,3 @@ pub fn serve( Ok(http_server) } - -fn from_meta_data( - meta_data: &RwLock>, - spec: &ChainSpec, -) -> api_types::MetaData { - let meta_data = meta_data.read(); - let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); - - let seq_number = *meta_data.seq_number(); - let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); - let syncnets = format_hex( - &meta_data - .syncnets() - .cloned() - .unwrap_or_default() - .into_bytes(), - ); - - if spec.is_peer_das_scheduled() { - api_types::MetaData::V3(api_types::MetaDataV3 { - seq_number, - attnets, - syncnets, - custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), - }) - } else { - api_types::MetaData::V2(api_types::MetaDataV2 { - seq_number, - attnets, - syncnets, - }) - } -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_message( - network_tx: &UnboundedSender>, - message: PubsubMessage, -) -> Result<(), warp::Rejection> { - publish_network_message( - network_tx, - NetworkMessage::Publish { - messages: vec![message], - }, - ) -} - -/// Publish a message to the libp2p pubsub network. -fn publish_pubsub_messages( - network_tx: &UnboundedSender>, - messages: Vec>, -) -> Result<(), warp::Rejection> { - publish_network_message(network_tx, NetworkMessage::Publish { messages }) -} - -/// Publish a message to the libp2p network. -fn publish_network_message( - network_tx: &UnboundedSender>, - message: NetworkMessage, -) -> Result<(), warp::Rejection> { - network_tx.send(message).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "unable to publish to network channel: {}", - e - )) - }) -} diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index ca9b86990c3..86eef03218b 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -3,13 +3,14 @@ use crate::version::{ beacon_response, }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use eth2::beacon_response::BeaconResponse; use eth2::types::{ self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; +use types::{EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ Rejection, hyper::{Body, Response}, diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 367e09969b4..3bd0cec7e33 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -6,9 +6,11 @@ use crate::{ add_ssz_content_type_header, beacon_response, inconsistent_fork_rejection, }, }; +use beacon_chain::graffiti_calculator::GraffitiSettings; use beacon_chain::{ BeaconBlockResponseWrapper, BeaconChain, BeaconChainTypes, ProduceBlockVerification, }; +use eth2::beacon_response::ForkVersionedResponse; use eth2::types::{self as api_types, ProduceBlockV3Metadata, SkipRandaoVerification}; use lighthouse_tracing::{SPAN_PRODUCE_BLOCK_V2, SPAN_PRODUCE_BLOCK_V3}; use ssz::Encode; @@ -67,11 +69,13 @@ pub async fn produce_block_v3( query.builder_boost_factor }; + let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); + let block_response_type = chain .produce_block_with_verification( randao_reveal, slot, - query.graffiti, + graffiti_settings, randao_verification, builder_boost_factor, BlockProductionVersion::V3, @@ -147,11 +151,13 @@ pub async fn produce_blinded_block_v2( })?; let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); + let block_response_type = chain .produce_block_with_verification( randao_reveal, slot, - query.graffiti, + graffiti_settings, randao_verification, None, BlockProductionVersion::BlindedV2, @@ -181,12 +187,13 @@ pub async fn produce_block_v2( })?; let randao_verification = get_randao_verification(&query, randao_reveal.is_infinity())?; + let graffiti_settings = GraffitiSettings::new(query.graffiti, query.graffiti_policy); let block_response_type = chain .produce_block_with_verification( randao_reveal, slot, - query.graffiti, + graffiti_settings, randao_verification, None, BlockProductionVersion::FullV2, diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 05a4a4b7a4a..b54c071eb80 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -9,9 +9,12 @@ use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainError, BeaconChainTypes, BlockError, IntoGossipVerifiedBlock, NotifyExecutionLayer, build_blob_data_column_sidecars, }; -use eth2::types::{ - BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, FullPayloadContents, - PublishBlockRequest, SignedBlockContents, +use eth2::{ + StatusCode, + types::{ + BlobsBundle, BroadcastValidation, ErrorMessage, ExecutionPayloadAndBlobs, + FullPayloadContents, PublishBlockRequest, SignedBlockContents, + }, }; use execution_layer::{ProvenancedPayload, SubmitBlindedBlockResponse}; use futures::TryFutureExt; @@ -25,14 +28,13 @@ use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use tokio::sync::mpsc::UnboundedSender; -use tracing::{Span, debug, debug_span, error, info, instrument, warn}; +use tracing::{Span, debug, debug_span, error, field, info, instrument, warn}; use tree_hash::TreeHash; use types::{ AbstractExecPayload, BeaconBlockRef, BlobSidecar, BlobsList, BlockImportSource, DataColumnSubnetId, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, FullPayload, FullPayloadBellatrix, Hash256, KzgProofs, SignedBeaconBlock, SignedBlindedBeaconBlock, }; -use warp::http::StatusCode; use warp::{Rejection, Reply, reply::Response}; pub type UnverifiedBlobs = Option<( @@ -80,7 +82,7 @@ impl ProvenancedBlock> name = SPAN_PUBLISH_BLOCK, level = "info", skip_all, - fields(?block_root, ?validation_level, provenance = tracing::field::Empty) + fields(block_root = field::Empty, ?validation_level, block_slot = field::Empty, provenance = field::Empty) )] pub async fn publish_block>( block_root: Option, @@ -103,12 +105,16 @@ pub async fn publish_block>( } else { "builder" }; - let current_span = Span::current(); - current_span.record("provenance", provenance); let block = unverified_block.inner_block(); + let block_root = block_root.unwrap_or_else(|| block.canonical_root()); + + let current_span = Span::current(); + current_span.record("provenance", provenance); + current_span.record("block_root", field::display(block_root)); + current_span.record("block_slot", field::display(block.slot())); - debug!(slot = %block.slot(), "Signed block received in HTTP API"); + debug!("Signed block received in HTTP API"); /* actually publish a block */ let publish_block_p2p = move |block: Arc>, @@ -132,9 +138,10 @@ pub async fn publish_block>( "Signed block published to network via HTTP API" ); - crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())).map_err( - |_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)), - )?; + crate::utils::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone())) + .map_err(|_| { + BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish)) + })?; Ok(()) }; @@ -152,12 +159,6 @@ pub async fn publish_block>( // Gossip verify the block and blobs/data columns separately. let gossip_verified_block_result = unverified_block.into_gossip_verified_block(&chain); - let block_root = block_root.unwrap_or_else(|| { - gossip_verified_block_result.as_ref().map_or_else( - |_| block.canonical_root(), - |verified_block| verified_block.block_root, - ) - }); let should_publish_block = gossip_verified_block_result.is_ok(); if BroadcastValidation::Gossip == validation_level && should_publish_block { @@ -304,14 +305,13 @@ pub async fn publish_block>( message: "duplicate block".to_string(), stacktraces: vec![], }), - duplicate_status_code, + warp_utils::status_code::convert(duplicate_status_code)?, ) .into_response()) } } - Err(BlockError::DuplicateImportStatusUnknown(root)) => { + Err(BlockError::DuplicateImportStatusUnknown(_)) => { debug!( - block_root = ?root, slot = %block.slot(), "Block previously seen" ); @@ -493,7 +493,7 @@ fn publish_blob_sidecars( blob: &GossipVerifiedBlob, ) -> Result<(), BlockError> { let pubsub_message = PubsubMessage::BlobSidecar(Box::new((blob.index(), blob.clone_blob()))); - crate::publish_pubsub_message(sender_clone, pubsub_message) + crate::utils::publish_pubsub_message(sender_clone, pubsub_message) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } @@ -526,7 +526,7 @@ fn publish_column_sidecars( PubsubMessage::DataColumnSidecar(Box::new((subnet, data_col))) }) .collect::>(); - crate::publish_pubsub_messages(sender_clone, pubsub_messages) + crate::utils::publish_pubsub_messages(sender_clone, pubsub_messages) .map_err(|_| BlockError::BeaconChainError(Box::new(BeaconChainError::UnableToPublish))) } diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index edda0e60a61..b9fa24ad6a4 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -1,6 +1,6 @@ //! Handlers for sync committee endpoints. -use crate::publish_pubsub_message; +use crate::utils::publish_pubsub_message; use beacon_chain::sync_committee_verification::{ Error as SyncVerificationError, VerifiedSyncCommitteeMessage, }; diff --git a/beacon_node/http_api/src/utils.rs b/beacon_node/http_api/src/utils.rs new file mode 100644 index 00000000000..f2b859ebe59 --- /dev/null +++ b/beacon_node/http_api/src/utils.rs @@ -0,0 +1,90 @@ +use crate::task_spawner::TaskSpawner; +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::EndpointVersion; +use lighthouse_network::PubsubMessage; +use lighthouse_network::rpc::methods::MetaData; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use parking_lot::RwLock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use types::{ChainSpec, EthSpec, ForkName}; +use warp::Rejection; +use warp::filters::BoxedFilter; + +pub type ResponseFilter = BoxedFilter<(warp::reply::Response,)>; +pub type AnyVersionFilter = BoxedFilter<(EndpointVersion,)>; +pub type EthV1Filter = BoxedFilter<()>; +pub type ChainFilter = BoxedFilter<(Arc>,)>; +pub type NotWhileSyncingFilter = BoxedFilter<(Result<(), Rejection>,)>; +pub type TaskSpawnerFilter = BoxedFilter<(TaskSpawner<::EthSpec>,)>; +pub type ValidatorSubscriptionTxFilter = BoxedFilter<(Sender,)>; +pub type NetworkTxFilter = + BoxedFilter<(UnboundedSender::EthSpec>>,)>; +pub type OptionalConsensusVersionHeaderFilter = BoxedFilter<(Option,)>; + +pub fn from_meta_data( + meta_data: &RwLock>, + spec: &ChainSpec, +) -> eth2::types::MetaData { + let meta_data = meta_data.read(); + let format_hex = |bytes: &[u8]| format!("0x{}", hex::encode(bytes)); + + let seq_number = *meta_data.seq_number(); + let attnets = format_hex(&meta_data.attnets().clone().into_bytes()); + let syncnets = format_hex( + &meta_data + .syncnets() + .cloned() + .unwrap_or_default() + .into_bytes(), + ); + + if spec.is_peer_das_scheduled() { + eth2::types::MetaData::V3(eth2::types::MetaDataV3 { + seq_number, + attnets, + syncnets, + custody_group_count: meta_data.custody_group_count().cloned().unwrap_or_default(), + }) + } else { + eth2::types::MetaData::V2(eth2::types::MetaDataV2 { + seq_number, + attnets, + syncnets, + }) + } +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_message( + network_tx: &UnboundedSender>, + message: PubsubMessage, +) -> Result<(), warp::Rejection> { + publish_network_message( + network_tx, + NetworkMessage::Publish { + messages: vec![message], + }, + ) +} + +/// Publish a message to the libp2p pubsub network. +pub fn publish_pubsub_messages( + network_tx: &UnboundedSender>, + messages: Vec>, +) -> Result<(), warp::Rejection> { + publish_network_message(network_tx, NetworkMessage::Publish { messages }) +} + +/// Publish a message to the libp2p network. +pub fn publish_network_message( + network_tx: &UnboundedSender>, + message: NetworkMessage, +) -> Result<(), warp::Rejection> { + network_tx.send(message).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "unable to publish to network channel: {}", + e + )) + }) +} diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs deleted file mode 100644 index 25b0feb99e8..00000000000 --- a/beacon_node/http_api/src/validator.rs +++ /dev/null @@ -1,22 +0,0 @@ -use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; -use types::{BeaconState, PublicKeyBytes}; - -/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator -/// index and then ensures that the validator exists in the given `state`. -pub fn pubkey_to_validator_index( - chain: &BeaconChain, - state: &BeaconState, - pubkey: &PublicKeyBytes, -) -> Result, Box> { - chain - .validator_index(pubkey) - .map_err(Box::new)? - .filter(|&index| { - state - .validators() - .get(index) - .is_some_and(|v| v.pubkey == *pubkey) - }) - .map(Result::Ok) - .transpose() -} diff --git a/beacon_node/http_api/src/validator/mod.rs b/beacon_node/http_api/src/validator/mod.rs new file mode 100644 index 00000000000..8baf7c52458 --- /dev/null +++ b/beacon_node/http_api/src/validator/mod.rs @@ -0,0 +1,972 @@ +use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; +use crate::task_spawner::{Priority, TaskSpawner}; +use crate::utils::{ + AnyVersionFilter, ChainFilter, EthV1Filter, NetworkTxFilter, NotWhileSyncingFilter, + ResponseFilter, TaskSpawnerFilter, ValidatorSubscriptionTxFilter, publish_network_message, +}; +use crate::version::V3; +use crate::{StateId, attester_duties, proposer_duties, sync_committees}; +use beacon_chain::attestation_verification::VerifiedAttestation; +use beacon_chain::validator_monitor::timestamp_now; +use beacon_chain::{AttestationError, BeaconChain, BeaconChainError, BeaconChainTypes}; +use bls::PublicKeyBytes; +use eth2::StatusCode; +use eth2::types::{ + Accept, BeaconCommitteeSubscription, EndpointVersion, Failure, GenericResponse, + StandardLivenessResponseData, StateId as CoreStateId, ValidatorAggregateAttestationQuery, + ValidatorAttestationDataQuery, ValidatorBlocksQuery, ValidatorIndexData, ValidatorStatus, +}; +use lighthouse_network::PubsubMessage; +use network::{NetworkMessage, ValidatorSubscriptionMessage}; +use slot_clock::SlotClock; +use std::sync::Arc; +use tokio::sync::mpsc::{Sender, UnboundedSender}; +use tokio::sync::oneshot; +use tracing::{debug, error, info, warn}; +use types::{ + BeaconState, Epoch, EthSpec, ProposerPreparationData, SignedAggregateAndProof, + SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncContributionData, + ValidatorSubscription, +}; +use warp::{Filter, Rejection, Reply}; +use warp_utils::reject::convert_rejection; + +/// Uses the `chain.validator_pubkey_cache` to resolve a pubkey to a validator +/// index and then ensures that the validator exists in the given `state`. +pub fn pubkey_to_validator_index( + chain: &BeaconChain, + state: &BeaconState, + pubkey: &PublicKeyBytes, +) -> Result, Box> { + chain + .validator_index(pubkey) + .map_err(Box::new)? + .filter(|&index| { + state + .validators() + .get(index) + .is_some_and(|v| v.pubkey == *pubkey) + }) + .map(Result::Ok) + .transpose() +} + +// GET validator/sync_committee_contribution +pub fn get_validator_sync_committee_contribution( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_contribution")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |sync_committee_data: SyncContributionData, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + chain + .get_aggregated_sync_committee_contribution(&sync_committee_data) + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "unable to fetch sync contribution: {:?}", + e + )) + })? + .map(GenericResponse::from) + .ok_or_else(|| { + warp_utils::reject::custom_not_found( + "no matching sync contribution found".to_string(), + ) + }) + }) + }, + ) + .boxed() +} + +// POST validator/duties/sync/{epoch} +pub fn post_validator_duties_sync( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("sync")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::sync_committee_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// POST validator/duties/attester/{epoch} +pub fn post_validator_duties_attester( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("attester")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + attester_duties::attester_duties(epoch, &indices.0, &chain) + }) + }, + ) + .boxed() +} + +// GET validator/aggregate_attestation?attestation_data_root,slot +pub fn get_validator_aggregate_attestation( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_attestation")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |endpoint_version: EndpointVersion, + query: ValidatorAggregateAttestationQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_response_task(Priority::P0, move || { + not_synced_filter?; + crate::aggregate_attestation::get_aggregate_attestation( + query.slot, + &query.attestation_data_root, + query.committee_index, + endpoint_version, + chain, + ) + }) + }, + ) + .boxed() +} + +// GET validator/attestation_data?slot,committee_index +pub fn get_validator_attestation_data( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("attestation_data")) + .and(warp::path::end()) + .and(warp::query::()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |query: ValidatorAttestationDataQuery, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; + + // allow a tolerance of one slot to account for clock skew + if query.slot > current_slot + 1 { + return Err(warp_utils::reject::custom_bad_request(format!( + "request slot {} is more than one slot past the current slot {}", + query.slot, current_slot + ))); + } + + chain + .produce_unaggregated_attestation(query.slot, query.committee_index) + .map(|attestation| attestation.data().clone()) + .map(GenericResponse::from) + .map_err(warp_utils::reject::unhandled_error) + }) + }, + ) + .boxed() +} + +// GET validator/blinded_blocks/{slot} +pub fn get_validator_blinded_blocks( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("blinded_blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |slot: Slot, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + accept_header: Option, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + produce_blinded_block_v2(accept_header, chain, slot, query).await + }) + }, + ) + .boxed() +} + +// GET validator/blocks/{slot} +pub fn get_validator_blocks( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("blocks")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid slot".to_string(), + )) + })) + .and(warp::path::end()) + .and(warp::header::optional::("accept")) + .and(not_while_syncing_filter) + .and(warp::query::()) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |endpoint_version: EndpointVersion, + slot: Slot, + accept_header: Option, + not_synced_filter: Result<(), Rejection>, + query: ValidatorBlocksQuery, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + debug!(?slot, "Block production request from HTTP API"); + + not_synced_filter?; + + if endpoint_version == V3 { + produce_block_v3(accept_header, chain, slot, query).await + } else { + produce_block_v2(accept_header, chain, slot, query).await + } + }) + }, + ) + .boxed() +} + +// POST validator/liveness/{epoch} +pub fn post_validator_liveness_epoch( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("liveness")) + .and(warp::path::param::()) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |epoch: Epoch, + indices: ValidatorIndexData, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + // Ensure the request is for either the current, previous or next epoch. + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; + let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); + let next_epoch = current_epoch.saturating_add(Epoch::new(1)); + + if epoch < prev_epoch || epoch > next_epoch { + return Err(warp_utils::reject::custom_bad_request(format!( + "request epoch {} is more than one epoch from the current epoch {}", + epoch, current_epoch + ))); + } + + let liveness: Vec = indices + .0 + .iter() + .cloned() + .map(|index| { + let is_live = chain.validator_seen_at_epoch(index as usize, epoch); + StandardLivenessResponseData { index, is_live } + }) + .collect(); + + Ok(GenericResponse::from(liveness)) + }) + }, + ) + .boxed() +} + +// POST validator/sync_committee_subscriptions +pub fn post_validator_sync_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("sync_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>, + | { + task_spawner.blocking_json_task(Priority::P0, move || { + for subscription in subscriptions { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + + let message = ValidatorSubscriptionMessage::SyncCommitteeSubscribe { + subscriptions: vec![subscription], + }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process sync subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down".to_string(), + )); + } + } + + Ok(()) + }) + }, + ).boxed() +} + +// POST validator/register_validator +pub fn post_validator_register_validator( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("register_validator")) + .and(warp::path::end()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |task_spawner: TaskSpawner, + chain: Arc>, + register_val_data: Vec| async { + let (tx, rx) = oneshot::channel(); + + let initial_result = task_spawner + .spawn_async_with_rejection_no_conversion(Priority::P0, async move { + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = register_val_data.len(), + "Received register validator request" + ); + + let head_snapshot = chain.head_snapshot(); + let spec = &chain.spec; + + let (preparation_data, filtered_registration_data): ( + Vec<(ProposerPreparationData, Option)>, + Vec, + ) = register_val_data + .into_iter() + .filter_map(|register_data| { + chain + .validator_index(®ister_data.message.pubkey) + .ok() + .flatten() + .and_then(|validator_index| { + let validator = head_snapshot + .beacon_state + .get_validator(validator_index) + .ok()?; + let validator_status = ValidatorStatus::from_validator( + validator, + current_epoch, + spec.far_future_epoch, + ) + .superstatus(); + let is_active_or_pending = + matches!(validator_status, ValidatorStatus::Pending) + || matches!( + validator_status, + ValidatorStatus::Active + ); + + // Filter out validators who are not 'active' or 'pending'. + is_active_or_pending.then_some({ + ( + ( + ProposerPreparationData { + validator_index: validator_index as u64, + fee_recipient: register_data + .message + .fee_recipient, + }, + Some(register_data.message.gas_limit), + ), + register_data, + ) + }) + }) + }) + .unzip(); + + // Update the prepare beacon proposer cache based on this request. + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|(data, limit)| (data, limit)), + ) + .await; + + // Call prepare beacon proposer blocking with the latest update in order to make + // sure we have a local payload to fall back to in the event of the blinded block + // flow failing. + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + info!( + count = filtered_registration_data.len(), + "Forwarding register validator request to connected builder" + ); + + // It's a waste of a `BeaconProcessor` worker to just + // wait on a response from the builder (especially since + // they have frequent timeouts). Spawn a new task and + // send the response back to our original HTTP request + // task via a channel. + let builder_future = async move { + let arc_builder = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)? + .builder(); + let builder = arc_builder + .as_ref() + .ok_or(BeaconChainError::BuilderMissing) + .map_err(warp_utils::reject::unhandled_error)?; + builder + .post_builder_validators(&filtered_registration_data) + .await + .map(|resp| warp::reply::json(&resp).into_response()) + .map_err(|e| { + warn!( + num_registrations = filtered_registration_data.len(), + error = ?e, + "Relay error when registering validator(s)" + ); + // Forward the HTTP status code if we are able to, otherwise fall back + // to a server error. + if let eth2::Error::ServerMessage(message) = e { + if message.code == StatusCode::BAD_REQUEST.as_u16() { + return warp_utils::reject::custom_bad_request( + message.message, + ); + } else { + // According to the spec this response should only be a 400 or 500, + // so we fall back to a 500 here. + return warp_utils::reject::custom_server_error( + message.message, + ); + } + } + warp_utils::reject::custom_server_error(format!("{e:?}")) + }) + }; + tokio::task::spawn(async move { tx.send(builder_future.await) }); + + // Just send a generic 200 OK from this closure. We'll + // ignore the `Ok` variant and form a proper response + // from what is sent back down the channel. + Ok(warp::reply::reply().into_response()) + }) + .await; + + if initial_result.is_err() { + return convert_rejection(initial_result).await; + } + + // Await a response from the builder without blocking a + // `BeaconProcessor` worker. + convert_rejection(rx.await.unwrap_or_else(|_| { + Ok(warp::reply::with_status( + warp::reply::json(&"No response from channel"), + warp::http::StatusCode::INTERNAL_SERVER_ERROR, + ) + .into_response()) + })) + .await + }, + ) + .boxed() +} + +// POST validator/prepare_beacon_proposer +pub fn post_validator_prepare_beacon_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("prepare_beacon_proposer")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(network_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .then( + |not_synced_filter: Result<(), Rejection>, + network_tx: UnboundedSender>, + task_spawner: TaskSpawner, + chain: Arc>, + preparation_data: Vec| { + task_spawner.spawn_async_with_rejection(Priority::P0, async move { + not_synced_filter?; + let execution_layer = chain + .execution_layer + .as_ref() + .ok_or(BeaconChainError::ExecutionLayerMissing) + .map_err(warp_utils::reject::unhandled_error)?; + + let current_slot = chain + .slot_clock + .now_or_genesis() + .ok_or(BeaconChainError::UnableToReadSlot) + .map_err(warp_utils::reject::unhandled_error)?; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + debug!( + count = preparation_data.len(), + "Received proposer preparation data" + ); + + execution_layer + .update_proposer_preparation( + current_epoch, + preparation_data.iter().map(|data| (data, &None)), + ) + .await; + + chain + .prepare_beacon_proposer(current_slot) + .await + .map_err(|e| { + warp_utils::reject::custom_bad_request(format!( + "error updating proposer preparations: {:?}", + e + )) + })?; + + if chain.spec.is_peer_das_scheduled() { + let (finalized_beacon_state, _, _) = + StateId(CoreStateId::Finalized).state(&chain)?; + let validators_and_balances = preparation_data + .iter() + .filter_map(|preparation| { + if let Ok(effective_balance) = finalized_beacon_state + .get_effective_balance(preparation.validator_index as usize) + { + Some((preparation.validator_index as usize, effective_balance)) + } else { + None + } + }) + .collect::>(); + + let current_slot = + chain.slot().map_err(warp_utils::reject::unhandled_error)?; + if let Some(cgc_change) = chain + .data_availability_checker + .custody_context() + .register_validators(validators_and_balances, current_slot, &chain.spec) + { + chain.update_data_column_custody_info(Some( + cgc_change + .effective_epoch + .start_slot(T::EthSpec::slots_per_epoch()), + )); + + network_tx.send(NetworkMessage::CustodyCountChanged { + new_custody_group_count: cgc_change.new_custody_group_count, + sampling_count: cgc_change.sampling_count, + }).unwrap_or_else(|e| { + debug!(error = %e, "Could not send message to the network service. \ + Likely shutdown") + }); + } + } + + Ok::<_, warp::reject::Rejection>(warp::reply::json(&()).into_response()) + }) + }, + ) + .boxed() +} + +// POST validator/beacon_committee_subscriptions +pub fn post_validator_beacon_committee_subscriptions( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + validator_subscription_tx_filter: ValidatorSubscriptionTxFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("beacon_committee_subscriptions")) + .and(warp::path::end()) + .and(warp_utils::json::json()) + .and(validator_subscription_tx_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .then( + |committee_subscriptions: Vec, + validator_subscription_tx: Sender, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + let subscriptions: std::collections::BTreeSet<_> = committee_subscriptions + .iter() + .map(|subscription| { + chain + .validator_monitor + .write() + .auto_register_local_validator(subscription.validator_index); + ValidatorSubscription { + attestation_committee_index: subscription.committee_index, + slot: subscription.slot, + committee_count_at_slot: subscription.committees_at_slot, + is_aggregator: subscription.is_aggregator, + } + }) + .collect(); + + let message = + ValidatorSubscriptionMessage::AttestationSubscribe { subscriptions }; + if let Err(e) = validator_subscription_tx.try_send(message) { + warn!( + info = "the host may be overloaded or resource-constrained", + error = ?e, + "Unable to process committee subscriptions" + ); + return Err(warp_utils::reject::custom_server_error( + "unable to queue subscription, host may be overloaded or shutting down" + .to_string(), + )); + } + Ok(()) + }) + }, + ) + .boxed() +} + +pub fn post_validator_contribution_and_proofs( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("contribution_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + |not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + contributions: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + sync_committees::process_signed_contribution_and_proofs( + contributions, + network_tx, + &chain, + )?; + Ok(GenericResponse::from(())) + }) + }, + ) + .boxed() +} + +// POST validator/aggregate_and_proofs +pub fn post_validator_aggregate_and_proofs( + any_version: AnyVersionFilter, + chain_filter: ChainFilter, + network_tx_filter: NetworkTxFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + any_version + .and(warp::path("validator")) + .and(warp::path("aggregate_and_proofs")) + .and(warp::path::end()) + .and(not_while_syncing_filter.clone()) + .and(task_spawner_filter.clone()) + .and(chain_filter.clone()) + .and(warp_utils::json::json()) + .and(network_tx_filter.clone()) + .then( + // V1 and V2 are identical except V2 has a consensus version header in the request. + // We only require this header for SSZ deserialization, which isn't supported for + // this endpoint presently. + |_endpoint_version: EndpointVersion, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>, + aggregates: Vec>, + network_tx: UnboundedSender>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + let seen_timestamp = timestamp_now(); + let mut verified_aggregates = Vec::with_capacity(aggregates.len()); + let mut messages = Vec::with_capacity(aggregates.len()); + let mut failures = Vec::new(); + + // Verify that all messages in the post are valid before processing further + for (index, aggregate) in aggregates.iter().enumerate() { + match chain.verify_aggregated_attestation_for_gossip(aggregate) { + Ok(verified_aggregate) => { + messages.push(PubsubMessage::AggregateAndProofAttestation(Box::new( + verified_aggregate.aggregate().clone(), + ))); + + // Notify the validator monitor. + chain + .validator_monitor + .read() + .register_api_aggregated_attestation( + seen_timestamp, + verified_aggregate.aggregate(), + verified_aggregate.indexed_attestation(), + &chain.slot_clock, + ); + + verified_aggregates.push((index, verified_aggregate)); + } + // If we already know the attestation, don't broadcast it or attempt to + // further verify it. Return success. + // + // It's reasonably likely that two different validators produce + // identical aggregates, especially if they're using the same beacon + // node. + Err(AttestationError::AttestationSupersetKnown(_)) => continue, + // If we've already seen this aggregator produce an aggregate, just + // skip this one. + // + // We're likely to see this with VCs that use fallback BNs. The first + // BN might time-out *after* publishing the aggregate and then the + // second BN will indicate it's already seen the aggregate. + // + // There's no actual error for the user or the network since the + // aggregate has been successfully published by some other node. + Err(AttestationError::AggregatorAlreadyKnown(_)) => continue, + Err(e) => { + error!( + error = ?e, + request_index = index, + aggregator_index = aggregate.message().aggregator_index(), + attestation_index = aggregate.message().aggregate().committee_index(), + attestation_slot = %aggregate.message().aggregate().data().slot, + "Failure verifying aggregate and proofs" + ); + failures.push(Failure::new(index, format!("Verification: {:?}", e))); + } + } + } + + // Publish aggregate attestations to the libp2p network + if !messages.is_empty() { + publish_network_message(&network_tx, NetworkMessage::Publish { messages })?; + } + + // Import aggregate attestations + for (index, verified_aggregate) in verified_aggregates { + if let Err(e) = chain.apply_attestation_to_fork_choice(&verified_aggregate) { + error!( + error = ?e, + request_index = index, + aggregator_index = verified_aggregate.aggregate().message().aggregator_index(), + attestation_index = verified_aggregate.attestation().committee_index(), + attestation_slot = %verified_aggregate.attestation().data().slot, + "Failure applying verified aggregate attestation to fork choice" + ); + failures.push(Failure::new(index, format!("Fork choice: {:?}", e))); + } + if let Err(e) = chain.add_to_block_inclusion_pool(verified_aggregate) { + warn!( + error = ?e, + request_index = index, + "Could not add verified aggregate attestation to the inclusion pool" + ); + failures.push(Failure::new(index, format!("Op pool: {:?}", e))); + } + } + + if !failures.is_empty() { + Err(warp_utils::reject::indexed_bad_request("error processing aggregate and proofs".to_string(), + failures, + )) + } else { + Ok(()) + } + }) + }, + ).boxed() +} + +// GET validator/duties/proposer/{epoch} +pub fn get_validator_duties_proposer( + eth_v1: EthV1Filter, + chain_filter: ChainFilter, + not_while_syncing_filter: NotWhileSyncingFilter, + task_spawner_filter: TaskSpawnerFilter, +) -> ResponseFilter { + eth_v1 + .and(warp::path("validator")) + .and(warp::path("duties")) + .and(warp::path("proposer")) + .and(warp::path::param::().or_else(|_| async { + Err(warp_utils::reject::custom_bad_request( + "Invalid epoch".to_string(), + )) + })) + .and(warp::path::end()) + .and(not_while_syncing_filter) + .and(task_spawner_filter) + .and(chain_filter) + .then( + |epoch: Epoch, + not_synced_filter: Result<(), Rejection>, + task_spawner: TaskSpawner, + chain: Arc>| { + task_spawner.blocking_json_task(Priority::P0, move || { + not_synced_filter?; + proposer_duties::proposer_duties(epoch, &chain) + }) + }, + ) + .boxed() +} diff --git a/beacon_node/http_api/src/version.rs b/beacon_node/http_api/src/version.rs index 871a10e7d4a..371064c886b 100644 --- a/beacon_node/http_api/src/version.rs +++ b/beacon_node/http_api/src/version.rs @@ -1,16 +1,14 @@ use crate::api_types::EndpointVersion; +use eth2::beacon_response::{ + BeaconResponse, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; use eth2::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, SSZ_CONTENT_TYPE_HEADER, }; use serde::Serialize; -use types::{ - BeaconResponse, ForkName, ForkVersionedResponse, InconsistentFork, Uint256, - UnversionedResponse, - beacon_response::{ - ExecutionOptimisticFinalizedBeaconResponse, ExecutionOptimisticFinalizedMetadata, - }, -}; +use types::{ForkName, InconsistentFork, Uint256}; use warp::reply::{self, Reply, Response}; pub const V1: EndpointVersion = EndpointVersion(1); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 82723c2b405..357b78cf41c 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -6,13 +6,12 @@ use beacon_chain::{ }; use eth2::reqwest::{Response, StatusCode}; use eth2::types::{BroadcastValidation, PublishBlockRequest}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use http_api::{Config, ProvenancedBlock, publish_blinded_block, publish_block, reconstruct_block}; use std::collections::HashSet; use std::sync::Arc; -use types::{ - ColumnIndex, Epoch, EthSpec, FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, Slot, -}; +use types::{ColumnIndex, Epoch, EthSpec, ForkName, Hash256, MainnetEthSpec, Slot}; use warp::Rejection; use warp_utils::reject::CustomBadRequest; @@ -823,6 +822,14 @@ pub async fn blinded_gossip_invalid() { tester.harness.advance_slot(); + // Ensure there's at least one blob in the block, so we don't run into failures when the + // block generator logic changes, as different errors could be returned: + // * Invalidity of blocks: `NotFinalizedDescendant` + // * Invalidity of blobs: `ParentUnknown` + tester + .harness + .execution_block_generator() + .set_min_blob_count(1); let (blinded_block, _) = tester .harness .make_blinded_block_with_modifier(chain_state_before, slot, |b| { @@ -838,21 +845,20 @@ pub async fn blinded_gossip_invalid() { assert!(response.is_err()); let error_response: eth2::Error = response.err().unwrap(); + assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); + let pre_finalized_block_root = Hash256::zero(); - /* mandated by Beacon API spec */ - if tester.harness.spec.is_fulu_scheduled() { - // XXX: this should be a 400 but is a 500 due to the mock-builder being janky - assert_eq!( - error_response.status(), - Some(StatusCode::INTERNAL_SERVER_ERROR) - ); + let expected_error_msg = if tester.harness.spec.is_fulu_scheduled() { + format!( + "BAD_REQUEST: NotFinalizedDescendant {{ block_parent_root: {pre_finalized_block_root:?} }}" + ) } else { - assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error( - error_response, - format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}"), - ); - } + // Since Deneb, the invalidity of the blobs will be detected prior to the invalidity of the + // block. + format!("BAD_REQUEST: ParentUnknown {{ parent_root: {pre_finalized_block_root:?} }}") + }; + + assert_server_message_error(error_response, expected_error_msg); } /// Process a blinded block that is invalid, but valid on gossip. @@ -1648,6 +1654,10 @@ pub async fn block_seen_on_gossip_with_some_blobs_or_columns() { ) .await; tester.harness.advance_slot(); + tester + .harness + .execution_block_generator() + .set_min_blob_count(2); let slot_a = Slot::new(num_initial); let slot_b = slot_a + 1; diff --git a/beacon_node/http_api/tests/fork_tests.rs b/beacon_node/http_api/tests/fork_tests.rs index 50cf866b6a8..b96c8bd1122 100644 --- a/beacon_node/http_api/tests/fork_tests.rs +++ b/beacon_node/http_api/tests/fork_tests.rs @@ -4,13 +4,15 @@ use beacon_chain::{ StateSkipConfig, test_utils::{DEFAULT_ETH1_BLOCK_HASH, HARNESS_GENESIS_TIME, RelativeSyncCommittee}, }; +use bls::PublicKey; use eth2::types::{IndexedErrorMessage, StateId, SyncSubcommittee}; use execution_layer::test_utils::generate_genesis_header; +use fixed_bytes::FixedBytesExtended; use genesis::{InteropGenesisBuilder, bls_withdrawal_credentials}; use http_api::test_utils::*; use std::collections::HashSet; use types::{ - Address, ChainSpec, Epoch, EthSpec, FixedBytesExtended, Hash256, MinimalEthSpec, Slot, + Address, ChainSpec, Epoch, EthSpec, Hash256, MinimalEthSpec, Slot, test_utils::{generate_deterministic_keypair, generate_deterministic_keypairs}, }; @@ -392,7 +394,7 @@ async fn bls_to_execution_changes_update_all_around_capella_fork() { fn withdrawal_credentials_fn<'a>( index: usize, - _: &'a types::PublicKey, + _: &'a PublicKey, spec: &'a ChainSpec, ) -> Hash256 { // It is a bit inefficient to regenerate the whole keypair here, but this is a workaround. diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index 83cb70a7a3a..b04c812773a 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -11,6 +11,7 @@ use beacon_processor::{Work, WorkEvent, work_reprocessing_queue::ReprocessQueueM use eth2::types::ProduceBlockV3Response; use eth2::types::{DepositContractData, StateId}; use execution_layer::{ForkchoiceState, PayloadAttributes}; +use fixed_bytes::FixedBytesExtended; use http_api::test_utils::InteractiveTester; use parking_lot::Mutex; use slot_clock::SlotClock; @@ -21,8 +22,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Duration; use types::{ - Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, FixedBytesExtended, ForkName, - Hash256, MainnetEthSpec, MinimalEthSpec, ProposerPreparationData, Slot, Uint256, + Address, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, ForkName, Hash256, MainnetEthSpec, + MinimalEthSpec, ProposerPreparationData, Slot, Uint256, }; type E = MainnetEthSpec; @@ -60,7 +61,10 @@ async fn state_by_root_pruned_from_fork_choice() { type E = MinimalEthSpec; let validator_count = 24; - let spec = ForkName::latest().make_genesis_spec(E::default_spec()); + // TODO(EIP-7732): extend test for Gloas by reverting back to using `ForkName::latest()` + // Issue is that this test does block production via `extend_chain_with_sync` which expects to be able to use `state.latest_execution_payload_header` during block production, but Gloas uses `latest_execution_bid` instead + // This will be resolved in a subsequent block processing PR + let spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); let tester = InteractiveTester::::new_with_initializer_and_mutator( Some(spec.clone()), @@ -400,7 +404,10 @@ pub async fn proposer_boost_re_org_test( assert!(head_slot > 0); // Test using the latest fork so that we simulate conditions as similar to mainnet as possible. - let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); + // TODO(EIP-7732): extend test for Gloas by reverting back to using `ForkName::latest()` + // Issue is that `get_validator_blocks_v3` below expects to be able to use `state.latest_execution_payload_header` during `produce_block_on_state` -> `produce_partial_beacon_block` -> `get_execution_payload`, but gloas will no longer support this state field + // This will be resolved in a subsequent block processing PR + let mut spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); spec.terminal_total_difficulty = Uint256::from(1); // Ensure there are enough validators to have `attesters_per_slot`. @@ -639,7 +646,7 @@ pub async fn proposer_boost_re_org_test( .into(); let (unsigned_block_type, _) = tester .client - .get_validator_blocks_v3::(slot_c, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot_c, &randao_reveal, None, None, None) .await .unwrap(); diff --git a/beacon_node/http_api/tests/status_tests.rs b/beacon_node/http_api/tests/status_tests.rs index fd5e282c5bd..556b75cb85a 100644 --- a/beacon_node/http_api/tests/status_tests.rs +++ b/beacon_node/http_api/tests/status_tests.rs @@ -12,8 +12,10 @@ type E = MinimalEthSpec; /// Create a new test environment that is post-merge with `chain_depth` blocks. async fn post_merge_tester(chain_depth: u64, validator_count: u64) -> InteractiveTester { - // Test using latest fork so that we simulate conditions as similar to mainnet as possible. - let mut spec = ForkName::latest().make_genesis_spec(E::default_spec()); + // TODO(EIP-7732): extend tests for Gloas by reverting back to using `ForkName::latest()` + // Issue is that these tests do block production via `extend_chain_with_sync` which expects to be able to use `state.latest_execution_payload_header` during block production, but Gloas uses `latest_execution_bid` instead + // This will be resolved in a subsequent block processing PR + let mut spec = ForkName::Fulu.make_genesis_spec(E::default_spec()); spec.terminal_total_difficulty = Uint256::from(1); let tester = InteractiveTester::::new(Some(spec), validator_count as usize).await; diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index dc2fd4ae440..ed7abead18a 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -6,6 +6,7 @@ use beacon_chain::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, }; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; use eth2::{ BeaconNodeHttpClient, Error, Error::ServerMessage, @@ -21,6 +22,7 @@ use execution_layer::test_utils::{ DEFAULT_BUILDER_PAYLOAD_VALUE_WEI, DEFAULT_GAS_LIMIT, DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI, MockBuilder, Operation, mock_builder_extra_data, mock_el_extra_data, }; +use fixed_bytes::FixedBytesExtended; use futures::FutureExt; use futures::stream::{Stream, StreamExt}; use http_api::{ @@ -34,6 +36,7 @@ use operation_pool::attestation_storage::CheckpointKey; use proto_array::ExecutionStatus; use sensitive_url::SensitiveUrl; use slot_clock::SlotClock; +use ssz::BitList; use state_processing::per_block_processing::get_expected_withdrawals; use state_processing::per_slot_processing; use state_processing::state_advance::partial_state_advance; @@ -43,9 +46,8 @@ use tokio::time::Duration; use tree_hash::TreeHash; use types::application_domain::ApplicationDomain; use types::{ - AggregateSignature, BitList, Domain, EthSpec, ExecutionBlockHash, Hash256, Keypair, - MainnetEthSpec, RelativeEpoch, SelectionProof, SignedRoot, SingleAttestation, Slot, - attestation::AttestationBase, + Domain, EthSpec, ExecutionBlockHash, Hash256, MainnetEthSpec, RelativeEpoch, SelectionProof, + SignedRoot, SingleAttestation, Slot, attestation::AttestationBase, }; type E = MainnetEthSpec; @@ -178,6 +180,9 @@ impl ApiTester { "precondition: current slot is one after head" ); + // Set a min blob count for the next block for get_blobs testing + harness.execution_block_generator().set_min_blob_count(2); + let (next_block, _next_state) = harness .make_block(head.beacon_state.clone(), harness.chain.slot().unwrap()) .await; @@ -1316,12 +1321,14 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); - let result = self + let result = match self .client .get_beacon_states_pending_deposits(state_id.0) .await - .unwrap() - .map(|res| res.data); + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; if result.is_none() && state_opt.is_none() { continue; @@ -1330,7 +1337,12 @@ impl ApiTester { let state = state_opt.as_mut().expect("result should be none"); let expected = state.pending_deposits().unwrap(); - assert_eq!(result.unwrap(), expected.to_vec()); + let response = result.unwrap(); + assert_eq!(response.data(), &expected.to_vec()); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); } self @@ -1343,12 +1355,14 @@ impl ApiTester { .ok() .map(|(state, _execution_optimistic, _finalized)| state); - let result = self + let result = match self .client .get_beacon_states_pending_partial_withdrawals(state_id.0) .await - .unwrap() - .map(|res| res.data); + { + Ok(response) => response, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; if result.is_none() && state_opt.is_none() { continue; @@ -1357,7 +1371,12 @@ impl ApiTester { let state = state_opt.as_mut().expect("result should be none"); let expected = state.pending_partial_withdrawals().unwrap(); - assert_eq!(result.unwrap(), expected.to_vec()); + let response = result.unwrap(); + assert_eq!(response.data(), &expected.to_vec()); + + // Check that the version header is returned in the response + let fork_name = state.fork_name(&self.chain.spec).unwrap(); + assert_eq!(response.version(), Some(fork_name),); } self @@ -1855,7 +1874,7 @@ impl ApiTester { } pub async fn test_get_blob_sidecars(self, use_indices: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); @@ -1888,7 +1907,7 @@ impl ApiTester { } pub async fn test_get_blobs(self, versioned_hashes: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); let num_blobs = block.num_expected_blobs(); @@ -1926,7 +1945,7 @@ impl ApiTester { } pub async fn test_get_blobs_post_fulu_full_node(self, versioned_hashes: bool) -> Self { - let block_id = BlockId(CoreBlockId::Finalized); + let block_id = BlockId(CoreBlockId::Head); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); let (block, _, _) = block_id.full_block(&self.chain).await.unwrap(); @@ -2836,9 +2855,19 @@ impl ApiTester { let expected = IdentityData { peer_id: self.local_enr.peer_id().to_string(), - enr: self.local_enr.clone(), - p2p_addresses: self.local_enr.multiaddr_p2p_tcp(), - discovery_addresses: self.local_enr.multiaddr_p2p_udp(), + enr: self.local_enr.to_base64(), + p2p_addresses: self + .local_enr + .multiaddr_p2p_tcp() + .iter() + .map(|a| a.to_string()) + .collect(), + discovery_addresses: self + .local_enr + .multiaddr_p2p_udp() + .iter() + .map(|a| a.to_string()) + .collect(), metadata: MetaData::V2(MetaDataV2 { seq_number: 0, attnets: "0x0000000000000000".to_string(), @@ -2867,7 +2896,7 @@ impl ApiTester { pub async fn test_get_node_peers_by_id(self) -> Self { let result = self .client - .get_node_peers_by_id(self.external_peer_id) + .get_node_peers_by_id(&self.external_peer_id.to_string()) .await .unwrap() .data; @@ -3040,11 +3069,11 @@ impl ApiTester { assert_eq!( result.justified_checkpoint, - expected_proto_array.justified_checkpoint + beacon_fork_choice.justified_checkpoint() ); assert_eq!( result.finalized_checkpoint, - expected_proto_array.finalized_checkpoint + beacon_fork_choice.finalized_checkpoint() ); let expected_fork_choice_nodes: Vec = expected_proto_array @@ -3071,6 +3100,32 @@ impl ApiTester { .execution_status .block_hash() .map(|block_hash| block_hash.into_root()), + extra_data: ForkChoiceExtraData { + target_root: node.target_root, + justified_root: node.justified_checkpoint.root, + finalized_root: node.finalized_checkpoint.root, + unrealized_justified_root: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_finalized_root: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.root), + unrealized_justified_epoch: node + .unrealized_justified_checkpoint + .map(|checkpoint| checkpoint.epoch), + unrealized_finalized_epoch: node + .unrealized_finalized_checkpoint + .map(|checkpoint| checkpoint.epoch), + execution_status: node.execution_status.to_string(), + best_child: node + .best_child + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|child| child.root), + best_descendant: node + .best_descendant + .and_then(|index| expected_proto_array.nodes.get(index)) + .map(|descendant| descendant.root), + }, } }) .collect(); @@ -3626,7 +3681,7 @@ impl ApiTester { let (response, metadata) = self .client - .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3_ssz::(slot, &randao_reveal, None, None, None) .await .unwrap(); @@ -4591,7 +4646,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -4618,7 +4673,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0)) + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(0), None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -4646,7 +4701,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX)) + .get_validator_blocks_v3::(slot, &randao_reveal, None, Some(u64::MAX), None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -4803,7 +4858,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -4884,7 +4939,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -4979,7 +5034,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5070,7 +5125,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5161,7 +5216,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5250,7 +5305,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5311,7 +5366,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5382,7 +5437,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5497,7 +5552,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5518,7 +5573,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5653,7 +5708,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5684,7 +5739,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(next_slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5766,7 +5821,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5840,7 +5895,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5909,7 +5964,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -5978,7 +6033,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -6045,7 +6100,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -6119,7 +6174,7 @@ impl ApiTester { let (payload_type, metadata) = self .client - .get_validator_blocks_v3::(slot, &randao_reveal, None, None) + .get_validator_blocks_v3::(slot, &randao_reveal, None, None, None) .await .unwrap(); Self::check_block_v3_metadata(&metadata, &payload_type); @@ -6809,6 +6864,82 @@ impl ApiTester { } self } + + async fn get_validator_blocks_v3_path_graffiti_policy(self) -> Self { + let slot = self.chain.slot().unwrap(); + let epoch = self.chain.epoch().unwrap(); + let (_, randao_reveal) = self.get_test_randao(slot, epoch).await; + let graffiti = Some(Graffiti::from([0; GRAFFITI_BYTES_LEN])); + let builder_boost_factor = None; + + // Default case where GraffitiPolicy is None + let default_path = self + .client + .get_validator_blocks_v3_path( + slot, + &randao_reveal, + graffiti.as_ref(), + SkipRandaoVerification::Yes, + builder_boost_factor, + None, + ) + .await + .unwrap(); + + let query_default_path = default_path.query().unwrap_or(""); + // When GraffitiPolicy is None, the HTTP API query path should not contain "graffiti_policy" + assert!( + !query_default_path.contains("graffiti_policy"), + "URL should not contain graffiti_policy parameter (same as PreserveUserGraffiti). URL is: {}", + query_default_path + ); + + let preserve_path = self + .client + .get_validator_blocks_v3_path( + slot, + &randao_reveal, + graffiti.as_ref(), + SkipRandaoVerification::Yes, + builder_boost_factor, + Some(GraffitiPolicy::PreserveUserGraffiti), + ) + .await + .unwrap(); + + let query_preserve_path = preserve_path.query().unwrap_or(""); + // When GraffitiPolicy is set to PreserveUserGraffiti, the HTTP API query path should not contain "graffiti_policy" + assert!( + !query_preserve_path.contains("graffiti_policy"), + "URL should not contain graffiti_policy parameter when using PreserveUserGraffiti. URL is: {}", + query_preserve_path + ); + + // The HTTP API query path for PreserveUserGraffiti should be the same as the default + assert_eq!(query_default_path, query_preserve_path); + + let append_path = self + .client + .get_validator_blocks_v3_path( + slot, + &randao_reveal, + graffiti.as_ref(), + SkipRandaoVerification::No, + builder_boost_factor, + Some(GraffitiPolicy::AppendClientVersions), + ) + .await + .unwrap(); + + let query_append_path = append_path.query().unwrap_or(""); + // When GraffitiPolicy is AppendClientVersions, the HTTP API query path should contain "graffiti_policy" + assert!( + query_append_path.contains("graffiti_policy"), + "URL should contain graffiti_policy=AppendClientVersions parameter. URL is: {}", + query_append_path + ); + self + } } async fn poll_events, eth2::Error>> + Unpin, E: EthSpec>( @@ -7853,6 +7984,8 @@ async fn get_blobs_post_fulu_supernode() { config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) + .await + .test_post_beacon_blocks_valid() .await // We can call the same get_blobs function in this test // because the function will call get_blobs_by_versioned_hashes which handles peerDAS post-Fulu @@ -7873,6 +8006,8 @@ async fn get_blobs_post_fulu_full_node() { config.spec.fulu_fork_epoch = Some(Epoch::new(0)); ApiTester::new_from_config(config) + .await + .test_post_beacon_blocks_valid() .await .test_get_blobs_post_fulu_full_node(false) .await @@ -7995,3 +8130,11 @@ async fn get_beacon_rewards_blocks_electra() { .test_beacon_block_rewards_electra() .await; } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_validator_blocks_v3_http_api_path() { + ApiTester::new() + .await + .get_validator_blocks_v3_path_graffiti_policy() + .await; +} diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index 7e69f6770bf..efb6f27dc52 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -3,6 +3,7 @@ name = "lighthouse_network" version = "0.2.0" authors = ["Sigma Prime "] edition = { workspace = true } +autotests = false [features] libp2p-websocket = [] @@ -10,15 +11,17 @@ libp2p-websocket = [] [dependencies] alloy-primitives = { workspace = true } alloy-rlp = { workspace = true } +bls = { workspace = true } bytes = { workspace = true } delay_map = { workspace = true } directory = { workspace = true } dirs = { workspace = true } discv5 = { workspace = true } either = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } gossipsub = { workspace = true } @@ -48,6 +51,7 @@ tokio = { workspace = true } tokio-util = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } unsigned-varint = { version = "0.8", features = ["codec"] } @@ -64,7 +68,6 @@ features = [ "plaintext", "secp256k1", "macros", - "ecdsa", "metrics", "quic", "upnp", @@ -73,6 +76,9 @@ features = [ [dev-dependencies] async-channel = { workspace = true } logging = { workspace = true } -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } +proptest = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "lighthouse_network_tests" +path = "tests/main.rs" diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 89c6c58d4f6..416ca73e08e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -94,9 +94,6 @@ pub struct Config { /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, - /// Subscribe to all data column subnets for the duration of the runtime. - pub subscribe_all_data_column_subnets: bool, - /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, @@ -355,7 +352,6 @@ impl Default for Config { upnp_enabled: true, network_load: 3, private: false, - subscribe_all_data_column_subnets: false, subscribe_all_subnets: false, import_all_attestations: false, shutdown_after_sync: false, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index bb9ff299c5d..4c285ea86c8 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -159,7 +159,7 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, - custody_group_count: Option, + custody_group_count: u64, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -185,7 +185,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, - custody_group_count: Option, + custody_group_count: u64, next_fork_digest: [u8; 4], spec: &ChainSpec, ) -> Result { @@ -280,15 +280,6 @@ pub fn build_enr( // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { - let custody_group_count = if let Some(cgc) = custody_group_count { - cgc - } else if let Some(false_cgc) = config.advertise_false_custody_group_count { - false_cgc - } else if config.subscribe_all_data_column_subnets { - spec.number_of_custody_groups - } else { - spec.custody_requirement - }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } @@ -373,7 +364,7 @@ mod test { fn build_enr_with_config( config: NetworkConfig, - cgc: Option, + cgc: u64, spec: &ChainSpec, ) -> (Enr, CombinedKey) { let keypair = libp2p::identity::secp256k1::Keypair::generate(); @@ -386,56 +377,23 @@ mod test { #[test] fn test_nfd_enr_encoding() { let spec = make_fulu_spec(); - let enr = build_enr_with_config(NetworkConfig::default(), None, &spec).0; + let enr = + build_enr_with_config(NetworkConfig::default(), spec.custody_requirement, &spec).0; assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD); } - #[test] - fn custody_group_count_default() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: false, - ..NetworkConfig::default() - }; - let spec = make_fulu_spec(); - - let enr = build_enr_with_config(config, None, &spec).0; - - assert_eq!( - enr.custody_group_count::(&spec).unwrap(), - spec.custody_requirement, - ); - } - - #[test] - fn custody_group_count_all() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: true, - ..NetworkConfig::default() - }; - let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, None, &spec).0; - - assert_eq!( - enr.custody_group_count::(&spec).unwrap(), - spec.number_of_custody_groups, - ); - } - #[test] fn custody_group_value() { - let config = NetworkConfig { - subscribe_all_data_column_subnets: true, - ..NetworkConfig::default() - }; + let config = NetworkConfig::default(); let spec = make_fulu_spec(); - let enr = build_enr_with_config(config, Some(42), &spec).0; + let enr = build_enr_with_config(config, 42, &spec).0; assert_eq!(enr.custody_group_count::(&spec).unwrap(), 42); } #[test] fn test_encode_decode_eth2_enr() { - let (enr, _key) = build_enr_with_config(NetworkConfig::default(), None, &E::default_spec()); + let (enr, _key) = build_enr_with_config(NetworkConfig::default(), 4, &E::default_spec()); // Check all Eth2 Mappings are decodeable enr.eth2().unwrap(); enr.attestation_bitfield::().unwrap(); diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 49de62546df..a8c87523a54 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1231,7 +1231,8 @@ mod tests { use super::*; use crate::rpc::methods::{MetaData, MetaDataV3}; use libp2p::identity::secp256k1; - use types::{BitVector, MinimalEthSpec, SubnetId}; + use ssz_types::BitVector; + use types::{MinimalEthSpec, SubnetId}; type E = MinimalEthSpec; @@ -1243,11 +1244,12 @@ mod tests { let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); let next_fork_digest = [0; 4]; + let custody_group_count = spec.custody_requirement; let enr: Enr = build_enr::( &enr_key, &config, &EnrForkId::default(), - None, + custody_group_count, next_fork_digest, &spec, ) @@ -1258,7 +1260,7 @@ mod tests { seq_number: 0, attnets: Default::default(), syncnets: Default::default(), - custody_group_count: spec.custody_requirement, + custody_group_count, }), vec![], false, diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index ad16bb0421c..3cfe2b3c3b7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -2975,11 +2975,11 @@ mod tests { use crate::peer_manager::tests::build_peer_manager_with_trusted_peers; use crate::rpc::{MetaData, MetaDataV3}; use libp2p::PeerId; - use quickcheck::{Arbitrary, Gen, TestResult}; - use quickcheck_macros::quickcheck; + use proptest::prelude::*; use std::collections::HashSet; use tokio::runtime::Runtime; - use types::{DataColumnSubnetId, Unsigned}; + use typenum::Unsigned; + use types::DataColumnSubnetId; use types::{EthSpec, MainnetEthSpec as E}; #[derive(Clone, Debug)] @@ -2994,159 +2994,202 @@ mod tests { custody_subnets: HashSet, } - impl Arbitrary for PeerCondition { - fn arbitrary(g: &mut Gen) -> Self { - let attestation_net_bitfield = { - let len = ::SubnetBitfieldLength::to_usize(); - let mut bitfield = Vec::with_capacity(len); - for _ in 0..len { - bitfield.push(bool::arbitrary(g)); - } - bitfield - }; - - let sync_committee_net_bitfield = { - let len = ::SyncCommitteeSubnetCount::to_usize(); - let mut bitfield = Vec::with_capacity(len); - for _ in 0..len { - bitfield.push(bool::arbitrary(g)); - } - bitfield - }; - - let spec = E::default_spec(); - let custody_subnets = { - let total_subnet_count = spec.data_column_sidecar_subnet_count; - let custody_subnet_count = u64::arbitrary(g) % (total_subnet_count + 1); // 0 to 128 - (spec.custody_requirement..total_subnet_count) - .filter(|_| bool::arbitrary(g)) - .map(DataColumnSubnetId::new) - .take(custody_subnet_count as usize) - .collect() - }; - - PeerCondition { - peer_id: PeerId::random(), - outgoing: bool::arbitrary(g), - attestation_net_bitfield, - sync_committee_net_bitfield, - score: f64::arbitrary(g), - trusted: bool::arbitrary(g), - gossipsub_score: f64::arbitrary(g), - custody_subnets, - } - } - } - - #[quickcheck] - fn prune_excess_peers(peer_conditions: Vec) -> TestResult { - let target_peer_count = DEFAULT_TARGET_PEERS; + fn peer_condition_strategy() -> impl Strategy { + let attestation_len = ::SubnetBitfieldLength::to_usize(); + let sync_committee_len = ::SyncCommitteeSubnetCount::to_usize(); let spec = E::default_spec(); - if peer_conditions.len() < target_peer_count { - return TestResult::discard(); - } - let trusted_peers: Vec<_> = peer_conditions - .iter() - .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) - .collect(); - // If we have a high percentage of trusted peers, it is very difficult to reason about - // the expected results of the pruning. - if trusted_peers.len() > peer_conditions.len() / 3_usize { - return TestResult::discard(); - } - let rt = Runtime::new().unwrap(); - - rt.block_on(async move { - // Collect all the trusted peers - let mut peer_manager = - build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; + let total_subnet_count = spec.data_column_sidecar_subnet_count; + let custody_requirement = spec.custody_requirement; + + // Create the pool of available subnet IDs + let available_subnets: Vec = (custody_requirement..total_subnet_count).collect(); + let max_custody_subnets = available_subnets.len(); + + // Trusted peer probability constants - 1 in 5 peers should be trusted (20%) + const TRUSTED_PEER_WEIGHT_FALSE: u32 = 4; + const TRUSTED_PEER_WEIGHT_TRUE: u32 = 1; + + ( + proptest::collection::vec(any::(), attestation_len), + proptest::collection::vec(any::(), sync_committee_len), + any::(), + any::(), + any::(), + // Weight trusted peers to avoid test rejection due to too many trusted peers + prop_oneof![ + TRUSTED_PEER_WEIGHT_FALSE => Just(false), + TRUSTED_PEER_WEIGHT_TRUE => Just(true), + ], + 0..=max_custody_subnets, + ) + .prop_flat_map( + move |( + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + outgoing, + gossipsub_score, + trusted, + custody_subnet_count, + )| { + // Use proptest's subsequence to select a random subset of subnets + let custody_subnets_strategy = proptest::sample::subsequence( + available_subnets.clone(), + custody_subnet_count, + ); - // Create peers based on the randomly generated conditions. - for condition in &peer_conditions { - let mut attnets = crate::types::EnrAttestationBitfield::::new(); - let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + ( + Just(attestation_net_bitfield), + Just(sync_committee_net_bitfield), + Just(score), + Just(outgoing), + Just(gossipsub_score), + Just(trusted), + custody_subnets_strategy, + ) + }, + ) + .prop_map( + |( + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + outgoing, + gossipsub_score, + trusted, + custody_subnets_vec, + )| { + let custody_subnets: HashSet = custody_subnets_vec + .into_iter() + .map(DataColumnSubnetId::new) + .collect(); + + PeerCondition { + peer_id: PeerId::random(), + outgoing, + attestation_net_bitfield, + sync_committee_net_bitfield, + score, + trusted, + gossipsub_score, + custody_subnets, + } + }, + ) + } - if condition.outgoing { - peer_manager.inject_connect_outgoing( - &condition.peer_id, - "/ip4/0.0.0.0".parse().unwrap(), - None, - ); - } else { - peer_manager.inject_connect_ingoing( - &condition.peer_id, - "/ip4/0.0.0.0".parse().unwrap(), - None, - ); - } + // Upper bound for testing peer pruning - we test with at least the target number + // and up to 50% more than the target to verify pruning behavior. + const MAX_TEST_PEERS: usize = 300; - for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { - attnets.set(i, *value).unwrap(); - } + proptest! { + #[test] + fn prune_excess_peers(peer_conditions in proptest::collection::vec(peer_condition_strategy(), DEFAULT_TARGET_PEERS..=MAX_TEST_PEERS)) { + let target_peer_count = DEFAULT_TARGET_PEERS; + let spec = E::default_spec(); - for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { - syncnets.set(i, *value).unwrap(); - } + let trusted_peers: Vec<_> = peer_conditions + .iter() + .filter_map(|p| if p.trusted { Some(p.peer_id) } else { None }) + .collect(); + // If we have a high percentage of trusted peers, it is very difficult to reason about + // the expected results of the pruning. + prop_assume!(trusted_peers.len() <= peer_conditions.len() / 3_usize); + + let rt = Runtime::new().unwrap(); + + let result = rt.block_on(async move { + // Collect all the trusted peers + let mut peer_manager = + build_peer_manager_with_trusted_peers(trusted_peers, target_peer_count).await; + + // Create peers based on the randomly generated conditions. + for condition in &peer_conditions { + let mut attnets = crate::types::EnrAttestationBitfield::::new(); + let mut syncnets = crate::types::EnrSyncCommitteeBitfield::::new(); + + if condition.outgoing { + peer_manager.inject_connect_outgoing( + &condition.peer_id, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } else { + peer_manager.inject_connect_ingoing( + &condition.peer_id, + "/ip4/0.0.0.0".parse().unwrap(), + None, + ); + } - let subnets_per_custody_group = - spec.data_column_sidecar_subnet_count / spec.number_of_custody_groups; - let metadata = MetaDataV3 { - seq_number: 0, - attnets, - syncnets, - custody_group_count: condition.custody_subnets.len() as u64 - / subnets_per_custody_group, - }; + for (i, value) in condition.attestation_net_bitfield.iter().enumerate() { + attnets.set(i, *value).unwrap(); + } - let mut peer_db = peer_manager.network_globals.peers.write(); - let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); - peer_info.set_meta_data(MetaData::V3(metadata)); - peer_info.set_gossipsub_score(condition.gossipsub_score); - peer_info.add_to_score(condition.score); - peer_info.set_custody_subnets(condition.custody_subnets.clone()); + for (i, value) in condition.sync_committee_net_bitfield.iter().enumerate() { + syncnets.set(i, *value).unwrap(); + } - for subnet in peer_info.long_lived_subnets() { - peer_db.add_subscription(&condition.peer_id, subnet); + let subnets_per_custody_group = + spec.data_column_sidecar_subnet_count / spec.number_of_custody_groups; + let metadata = MetaDataV3 { + seq_number: 0, + attnets, + syncnets, + custody_group_count: condition.custody_subnets.len() as u64 + / subnets_per_custody_group, + }; + + let mut peer_db = peer_manager.network_globals.peers.write(); + let peer_info = peer_db.peer_info_mut(&condition.peer_id).unwrap(); + peer_info.set_meta_data(MetaData::V3(metadata)); + peer_info.set_gossipsub_score(condition.gossipsub_score); + peer_info.add_to_score(condition.score); + peer_info.set_custody_subnets(condition.custody_subnets.clone()); + + for subnet in peer_info.long_lived_subnets() { + peer_db.add_subscription(&condition.peer_id, subnet); + } } - } - - // Perform the heartbeat. - peer_manager.heartbeat(); - // The minimum number of connected peers cannot be less than the target peer count - // or submitted peers. + // Perform the heartbeat. + peer_manager.heartbeat(); + + // The minimum number of connected peers cannot be less than the target peer count + // or submitted peers. + + let expected_peer_count = target_peer_count.min(peer_conditions.len()); + // Trusted peers could make this larger however. + let no_of_trusted_peers = peer_conditions + .iter() + .filter(|condition| condition.trusted) + .count(); + let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); + + let target_peer_condition = + peer_manager.network_globals.connected_or_dialing_peers() + == expected_peer_count; + + // It could be that we reach our target outbound limit and are unable to prune any + // extra, which violates the target_peer_condition. + let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); + let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); + + // No trusted peers should be disconnected + let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { + condition.trusted + && !peer_manager + .network_globals + .peers + .read() + .is_connected(&condition.peer_id) + }); - let expected_peer_count = target_peer_count.min(peer_conditions.len()); - // Trusted peers could make this larger however. - let no_of_trusted_peers = peer_conditions - .iter() - .filter(|condition| condition.trusted) - .count(); - let expected_peer_count = expected_peer_count.max(no_of_trusted_peers); - - let target_peer_condition = - peer_manager.network_globals.connected_or_dialing_peers() - == expected_peer_count; - - // It could be that we reach our target outbound limit and are unable to prune any - // extra, which violates the target_peer_condition. - let outbound_peers = peer_manager.network_globals.connected_outbound_only_peers(); - let hit_outbound_limit = outbound_peers == peer_manager.target_outbound_peers(); - - // No trusted peers should be disconnected - let trusted_peer_disconnected = peer_conditions.iter().any(|condition| { - condition.trusted - && !peer_manager - .network_globals - .peers - .read() - .is_connected(&condition.peer_id) + (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected }); - TestResult::from_bool( - (target_peer_condition || hit_outbound_limit) && !trusted_peer_disconnected, - ) - }) + prop_assert!(result); + } } } diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index acb01884564..48a29699c8f 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -8,7 +8,7 @@ use libp2p::bytes::BytesMut; use snap::read::FrameDecoder; use snap::write::FrameEncoder; use ssz::{Decode, Encode}; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::io::Cursor; use std::io::ErrorKind; use std::io::{Read, Write}; @@ -18,10 +18,10 @@ use tokio_util::codec::{Decoder, Encoder}; use types::{ BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, SignedBeaconBlockAltair, + SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBeaconBlockGloas, }; use unsigned_varint::codec::Uvi; @@ -908,11 +908,12 @@ mod tests { use super::*; use crate::rpc::protocol::*; use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; + use bls::Signature; + use fixed_bytes::FixedBytesExtended; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, - DataColumnsByRootIdentifier, EmptyBlock, Epoch, FixedBytesExtended, FullPayload, - KzgCommitment, KzgProof, Signature, SignedBeaconBlockHeader, Slot, - blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, + DataColumnsByRootIdentifier, EmptyBlock, Epoch, FullPayload, KzgCommitment, KzgProof, + SignedBeaconBlockHeader, Slot, blob_sidecar::BlobIdentifier, data_column_sidecar::Cell, }; type Spec = types::MainnetEthSpec; @@ -1002,8 +1003,9 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = + VariableList::try_from(std::iter::repeat_n(tx, 5000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1021,8 +1023,9 @@ mod tests { let mut block: BeaconBlockBellatrix<_, FullPayload> = BeaconBlockBellatrix::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = + VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1080,7 +1083,7 @@ mod tests { data_column_ids: RuntimeVariableList::new( vec![DataColumnsByRootIdentifier { block_root: Hash256::zero(), - columns: VariableList::from(vec![0, 1, 2]), + columns: VariableList::try_from(vec![0, 1, 2]).unwrap(), }], spec.max_request_blocks(fork_name), ) diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 9319973e597..a9b4aa2fbad 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -5,7 +5,7 @@ use regex::bytes::Regex; use serde::Serialize; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::{VariableList, typenum::U256}; +use ssz_types::{RuntimeVariableList, VariableList, typenum::U256}; use std::fmt::Display; use std::marker::PhantomData; use std::ops::Deref; @@ -17,7 +17,7 @@ use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; use types::{ ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnsByRootIdentifier, Epoch, EthSpec, ForkContext, Hash256, LightClientBootstrap, LightClientFinalityUpdate, - LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, + LightClientOptimisticUpdate, LightClientUpdate, SignedBeaconBlock, Slot, blob_sidecar::BlobSidecar, }; @@ -29,15 +29,21 @@ pub const MAX_ERROR_LEN: u64 = 256; #[derive(Debug, Clone)] pub struct ErrorType(pub VariableList); -impl From for ErrorType { - fn from(s: String) -> Self { - Self(VariableList::from(s.as_bytes().to_vec())) +impl From<&str> for ErrorType { + // This will truncate the error if `string.as_bytes()` exceeds `MaxErrorLen`. + fn from(s: &str) -> Self { + let mut bytes = s.as_bytes().to_vec(); + bytes.truncate(MAX_ERROR_LEN as usize); + Self( + VariableList::try_from(bytes) + .expect("length should not exceed MaxErrorLen after truncation"), + ) } } -impl From<&str> for ErrorType { - fn from(s: &str) -> Self { - Self(VariableList::from(s.as_bytes().to_vec())) +impl From for ErrorType { + fn from(s: String) -> Self { + Self::from(s.as_str()) } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 228a74f08cc..366515d42f6 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -1,5 +1,6 @@ use super::methods::*; use crate::rpc::codec::SSZSnappyInboundCodec; +use bls::Signature; use futures::future::BoxFuture; use futures::prelude::{AsyncRead, AsyncWrite}; use futures::{FutureExt, StreamExt}; @@ -20,7 +21,7 @@ use types::{ EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, + MainnetEthSpec, MinimalEthSpec, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -70,13 +71,15 @@ pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = LazyLock::new(BlobSidecar::::max_size); pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { - VariableList::::from(Vec::::new()) + VariableList::::try_from(Vec::::new()) + .expect("MaxErrorLen should not exceed MAX_ERROR_LEN") .as_ssz_bytes() .len() }); pub static ERROR_TYPE_MAX: LazyLock = LazyLock::new(|| { - VariableList::::from(vec![0u8; MAX_ERROR_LEN as usize]) + VariableList::::try_from(vec![0u8; MAX_ERROR_LEN as usize]) + .expect("MaxErrorLen should not exceed MAX_ERROR_LEN") .as_ssz_bytes() .len() }); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 93c69ee097b..4eebda1decb 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -199,7 +199,7 @@ impl Network { local_keypair.clone(), &config, &ctx.enr_fork_id, - Some(advertised_cgc), + advertised_cgc, next_fork_digest, &ctx.chain_spec, )?; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index a0026837e37..63f22be5e2c 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -41,7 +41,7 @@ pub fn build_transport( quic_support: bool, ) -> std::io::Result { // mplex config - let mut mplex_config = libp2p_mplex::MplexConfig::new(); + let mut mplex_config = libp2p_mplex::Config::new(); mplex_config.set_max_buffer_size(256); mplex_config.set_max_buffer_behaviour(libp2p_mplex::MaxBufferBehaviour::Block); diff --git a/beacon_node/lighthouse_network/src/types/globals.rs b/beacon_node/lighthouse_network/src/types/globals.rs index 2a3571c3b70..f46eb05ceb0 100644 --- a/beacon_node/lighthouse_network/src/types/globals.rs +++ b/beacon_node/lighthouse_network/src/types/globals.rs @@ -227,7 +227,6 @@ impl NetworkGlobals { TopicConfig { enable_light_client_server: self.config.enable_light_client_server, subscribe_all_subnets: self.config.subscribe_all_subnets, - subscribe_all_data_column_subnets: self.config.subscribe_all_data_column_subnets, sampling_subnets: self.sampling_subnets.read().clone(), } } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 3f57406fc78..eea8782b2d5 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -3,7 +3,8 @@ mod pubsub; mod subnet; mod topics; -use types::{BitVector, EthSpec}; +use ssz_types::BitVector; +use types::EthSpec; pub type EnrAttestationBitfield = BitVector<::SubnetBitfieldLength>; pub type EnrSyncCommitteeBitfield = BitVector<::SyncCommitteeSubnetCount>; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index b22adfbc487..0c988f35c39 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -2,7 +2,8 @@ use gossipsub::{IdentTopic as Topic, TopicHash}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use strum::AsRefStr; -use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId, Unsigned}; +use typenum::Unsigned; +use types::{ChainSpec, DataColumnSubnetId, EthSpec, ForkName, SubnetId, SyncSubnetId}; use crate::Subnet; @@ -29,7 +30,6 @@ pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic_update pub struct TopicConfig { pub enable_light_client_server: bool, pub subscribe_all_subnets: bool, - pub subscribe_all_data_column_subnets: bool, pub sampling_subnets: HashSet, } @@ -80,14 +80,8 @@ pub fn core_topics_to_subscribe( } if fork_name.fulu_enabled() { - if opts.subscribe_all_data_column_subnets { - for i in 0..spec.data_column_sidecar_subnet_count { - topics.push(GossipKind::DataColumnSidecar(i.into())); - } - } else { - for subnet in &opts.sampling_subnets { - topics.push(GossipKind::DataColumnSidecar(*subnet)); - } + for subnet in &opts.sampling_subnets { + topics.push(GossipKind::DataColumnSidecar(*subnet)); } } @@ -125,7 +119,6 @@ pub fn all_topics_at_fork(fork: ForkName, spec: &ChainSpec) -> Vec(fork, &opts, spec) @@ -520,7 +513,6 @@ mod tests { TopicConfig { enable_light_client_server: false, subscribe_all_subnets: false, - subscribe_all_data_column_subnets: false, sampling_subnets: sampling_subnets.clone(), } } @@ -552,9 +544,8 @@ mod tests { #[test] fn columns_are_subscribed_in_peerdas() { let spec = get_spec(); - let s = get_sampling_subnets(); - let mut topic_config = get_topic_config(&s); - topic_config.subscribe_all_data_column_subnets = true; + let s = HashSet::from_iter([0.into()]); + let topic_config = get_topic_config(&s); assert!( core_topics_to_subscribe::(ForkName::Fulu, &topic_config, &spec) .contains(&GossipKind::DataColumnSidecar(0.into())) diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 9e8b243698b..412ee5aca5a 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -1,4 +1,5 @@ #![cfg(test)] +use fixed_bytes::FixedBytesExtended; use lighthouse_network::Enr; use lighthouse_network::Multiaddr; use lighthouse_network::service::Network as LibP2PService; @@ -9,10 +10,7 @@ use std::sync::Weak; use tokio::runtime::Runtime; use tracing::{Instrument, debug, error, info_span}; use tracing_subscriber::EnvFilter; -use types::{ - ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, - MinimalEthSpec, -}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, ForkContext, ForkName, Hash256, MinimalEthSpec}; type E = MinimalEthSpec; @@ -109,7 +107,7 @@ pub fn build_config( config.set_ipv4_listening_address(std::net::Ipv4Addr::UNSPECIFIED, port, port, port); config.enr_address = (Some(std::net::Ipv4Addr::LOCALHOST), None); config.boot_nodes_enr.append(&mut boot_nodes); - config.network_dir = path.into_path(); + config.network_dir = path.keep(); config.disable_peer_scoring = disable_peer_scoring; config.inbound_rate_limiter_config = inbound_rate_limiter; Arc::new(config) diff --git a/beacon_node/lighthouse_network/tests/main.rs b/beacon_node/lighthouse_network/tests/main.rs new file mode 100644 index 00000000000..2ed0eabaff7 --- /dev/null +++ b/beacon_node/lighthouse_network/tests/main.rs @@ -0,0 +1,2 @@ +mod common; +mod rpc_tests; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index e37f4131a76..599fcd242bf 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -1,14 +1,15 @@ #![cfg(test)] -mod common; - +use crate::common; use crate::common::spec_with_all_forks_enabled; -use common::{Protocol, build_tracing_subscriber}; +use crate::common::{Protocol, build_tracing_subscriber}; +use bls::Signature; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::rpc::{RequestType, methods::*}; use lighthouse_network::service::api_types::AppRequestId; use lighthouse_network::{NetworkEvent, ReportSource, Response}; use ssz::Encode; -use ssz_types::VariableList; +use ssz_types::{RuntimeVariableList, VariableList}; use std::sync::Arc; use std::time::{Duration, Instant}; use tokio::runtime::Runtime; @@ -17,8 +18,8 @@ use tracing::{Instrument, debug, error, info_span, warn}; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockHeader, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnsByRootIdentifier, EmptyBlock, Epoch, - EthSpec, FixedBytesExtended, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, - RuntimeVariableList, Signature, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + EthSpec, ForkName, Hash256, KzgCommitment, KzgProof, MinimalEthSpec, SignedBeaconBlock, + SignedBeaconBlockHeader, Slot, }; type E = MinimalEthSpec; @@ -26,8 +27,8 @@ type E = MinimalEthSpec; /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = VariableList::try_from(std::iter::repeat_n(tx, 5000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -41,8 +42,8 @@ fn bellatrix_block_small(spec: &ChainSpec) -> BeaconBlock { /// Hence, we generate a bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(spec: &ChainSpec) -> BeaconBlock { let mut block = BeaconBlockBellatrix::::empty(spec); - let tx = VariableList::from(vec![0; 1024]); - let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); + let tx = VariableList::try_from(vec![0; 1024]).unwrap(); + let txs = VariableList::try_from(std::iter::repeat_n(tx, 100000).collect::>()).unwrap(); block.body.execution_payload.execution_payload.transactions = txs; @@ -1018,14 +1019,17 @@ fn test_tcp_columns_by_root_chunked_rpc() { }, signature: Signature::empty(), }, - column: vec![vec![0; E::bytes_per_blob()].into()].into(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].into(), - kzg_proofs: vec![KzgProof::empty()].into(), + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), kzg_commitments_inclusion_proof: vec![ Hash256::zero(); E::kzg_commitments_inclusion_proof_depth() ] - .into(), + .try_into() + .unwrap(), }); let rpc_response = Response::DataColumnsByRoot(Some(data_column.clone())); @@ -1160,14 +1164,17 @@ fn test_tcp_columns_by_range_chunked_rpc() { }, signature: Signature::empty(), }, - column: vec![vec![0; E::bytes_per_blob()].into()].into(), - kzg_commitments: vec![KzgCommitment::empty_for_testing()].into(), - kzg_proofs: vec![KzgProof::empty()].into(), + column: vec![vec![0; E::bytes_per_cell()].try_into().unwrap()] + .try_into() + .unwrap(), + kzg_commitments: vec![KzgCommitment::empty_for_testing()].try_into().unwrap(), + kzg_proofs: vec![KzgProof::empty()].try_into().unwrap(), kzg_commitments_inclusion_proof: vec![ Hash256::zero(); E::kzg_commitments_inclusion_proof_depth() ] - .into(), + .try_into() + .unwrap(), }); let rpc_response = Response::DataColumnsByRange(Some(data_column.clone())); diff --git a/beacon_node/network/Cargo.toml b/beacon_node/network/Cargo.toml index 5615148648d..bf261965760 100644 --- a/beacon_node/network/Cargo.toml +++ b/beacon_node/network/Cargo.toml @@ -19,9 +19,10 @@ async-channel = { workspace = true } beacon_chain = { workspace = true } beacon_processor = { workspace = true } delay_map = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fnv = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -45,6 +46,7 @@ tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index a492ece5080..eb70147c6ef 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -735,12 +735,11 @@ impl NetworkBeaconProcessor { // Data column is available via either the EL or reconstruction. // Do not penalise the peer. // Gossip filter should filter any duplicates received after this. - debug!( - %slot, - %block_root, - %index, - "Received already available column sidecar. Ignoring the column sidecar" - ) + self.propagate_validation_result( + message_id, + peer_id, + MessageAcceptance::Ignore, + ); } GossipDataColumnError::FutureSlot { .. } | GossipDataColumnError::PastFinalizedSlot { .. } => { @@ -2742,6 +2741,20 @@ impl NetworkBeaconProcessor { } } } + AttnError::SszTypesError(_) => { + error!( + %peer_id, + block = ?beacon_block_root, + ?attestation_type, + "Rejecting attestation due to a critical SSZ types error" + ); + self.propagate_validation_result(message_id, peer_id, MessageAcceptance::Reject); + self.gossip_penalize_peer( + peer_id, + PeerAction::MidToleranceError, + "attn_ssz_types_error", + ); + } } debug!( diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 5fa2361f280..bebda36d71c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -866,6 +866,9 @@ impl NetworkBeaconProcessor { "Reconstruction not required for block" ); } + Err(BlockError::DuplicateFullyImported(_)) => { + debug!("Block already imported in parallel with reconstruction"); + } Err(e) => { error!( %block_root, diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index a9794cb5c42..ed04fe7bb97 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -33,6 +33,7 @@ use lighthouse_network::{ }; use matches::assert_matches; use slot_clock::SlotClock; +use ssz_types::RuntimeVariableList; use std::collections::HashSet; use std::iter::Iterator; use std::sync::Arc; @@ -42,8 +43,8 @@ use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; use types::{ AttesterSlashing, BlobSidecar, BlobSidecarList, ChainSpec, DataColumnSidecarList, DataColumnSubnetId, Epoch, EthSpec, Hash256, MainnetEthSpec, ProposerSlashing, - RuntimeVariableList, SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, - SingleAttestation, Slot, SubnetId, + SignedAggregateAndProof, SignedBeaconBlock, SignedVoluntaryExit, SingleAttestation, Slot, + SubnetId, }; type E = MainnetEthSpec; @@ -916,36 +917,29 @@ async fn data_column_reconstruction_at_deadline() { .start_of(rig.next_block.slot()) .unwrap(); - rig.chain - .slot_clock - .set_current_time(slot_start - rig.chain.spec.maximum_gossip_clock_disparity()); - - assert_eq!( - rig.chain.slot().unwrap(), - rig.next_block.slot() - 1, - "chain should be at the correct slot" - ); - // We push the slot clock to 3 seconds into the slot, this is the deadline to trigger reconstruction. + let slot_duration = rig.chain.slot_clock.slot_duration().as_millis() as u64; + let reconstruction_deadline_millis = + (slot_duration * RECONSTRUCTION_DEADLINE.0) / RECONSTRUCTION_DEADLINE.1; rig.chain .slot_clock - .set_current_time(slot_start + Duration::from_secs(3)); + .set_current_time(slot_start + Duration::from_millis(reconstruction_deadline_millis)); - let num_data_columns = rig.next_data_columns.as_ref().map(|c| c.len()).unwrap_or(0); - for i in 0..num_data_columns { + let min_columns_for_reconstruction = E::number_of_columns() / 2; + for i in 0..min_columns_for_reconstruction { rig.enqueue_gossip_data_columns(i); rig.assert_event_journal_completes(&[WorkType::GossipDataColumnSidecar]) .await; } // Since we're at the reconstruction deadline, reconstruction should be triggered immediately - if num_data_columns > 0 { - rig.assert_event_journal_completes_with_timeout( - &[WorkType::ColumnReconstruction], - Duration::from_millis(50), - ) - .await; - } + rig.assert_event_journal_with_timeout( + &[WorkType::ColumnReconstruction.into()], + Duration::from_millis(50), + false, + false, + ) + .await; } // Test the column reconstruction is delayed for columns that arrive for a previous slot. @@ -1705,8 +1699,9 @@ async fn test_blobs_by_range_spans_fulu_fork() { spec.fulu_fork_epoch = Some(Epoch::new(1)); spec.gloas_fork_epoch = Some(Epoch::new(2)); + // This test focuses on Electra→Fulu blob counts (epoch 0 to 1). Build 62 blocks since no need for Gloas activation at slot 64. let mut rig = TestRig::new_parametric( - 64, + 62, BeaconProcessorConfig::default(), NodeCustodyType::Fullnode, spec, diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index a416f5cb123..0869b442aec 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -37,8 +37,9 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use tracing::{debug, error, info, trace, warn}; +use typenum::Unsigned; use types::{ - EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, + EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, ValidatorSubscription, }; diff --git a/beacon_node/network/src/status.rs b/beacon_node/network/src/status.rs index ebf5c1829e5..c571a40485c 100644 --- a/beacon_node/network/src/status.rs +++ b/beacon_node/network/src/status.rs @@ -1,5 +1,6 @@ use beacon_chain::{BeaconChain, BeaconChainTypes}; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use fixed_bytes::FixedBytesExtended; +use types::{EthSpec, Hash256}; use lighthouse_network::rpc::{StatusMessage, methods::StatusMessageV2}; /// Trait to produce a `StatusMessage` representing the state of the given `beacon_chain`. diff --git a/beacon_node/network/src/sync/batch.rs b/beacon_node/network/src/sync/batch.rs index ea0ef15f4b2..8de386f5be2 100644 --- a/beacon_node/network/src/sync/batch.rs +++ b/beacon_node/network/src/sync/batch.rs @@ -1,5 +1,5 @@ use beacon_chain::block_verification_types::RpcBlock; -use derivative::Derivative; +use educe::Educe; use lighthouse_network::PeerId; use lighthouse_network::rpc::methods::BlocksByRangeRequest; use lighthouse_network::rpc::methods::DataColumnsByRangeRequest; @@ -78,8 +78,8 @@ pub enum BatchProcessingResult { NonFaultyFailure, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] /// A segment of a chain. pub struct BatchInfo { /// Start slot of the batch. @@ -97,7 +97,7 @@ pub struct BatchInfo { /// Whether this batch contains all blocks or all blocks and blobs. batch_type: ByRangeRequestType, /// Pin the generic - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] marker: std::marker::PhantomData<(E, B)>, } diff --git a/beacon_node/network/src/sync/block_lookups/parent_chain.rs b/beacon_node/network/src/sync/block_lookups/parent_chain.rs index 551a0261f2c..5deea1dd94e 100644 --- a/beacon_node/network/src/sync/block_lookups/parent_chain.rs +++ b/beacon_node/network/src/sync/block_lookups/parent_chain.rs @@ -118,7 +118,8 @@ pub(crate) fn find_oldest_fork_ancestor( #[cfg(test)] mod tests { use super::{Node, compute_parent_chains, find_oldest_fork_ancestor}; - use types::{FixedBytesExtended, Hash256}; + use fixed_bytes::FixedBytesExtended; + use types::Hash256; fn h(n: u64) -> Hash256 { Hash256::from_low_u64_be(n) diff --git a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs index 8fb3248a871..46897b2283b 100644 --- a/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs +++ b/beacon_node/network/src/sync/block_lookups/single_block_lookup.rs @@ -5,7 +5,7 @@ use crate::sync::network_context::{ SyncNetworkContext, }; use beacon_chain::{BeaconChainTypes, BlockProcessStatus}; -use derivative::Derivative; +use educe::Educe; use lighthouse_network::service::api_types::Id; use lighthouse_tracing::SPAN_SINGLE_BLOCK_LOOKUP; use parking_lot::RwLock; @@ -57,8 +57,8 @@ pub enum LookupRequestError { }, } -#[derive(Derivative)] -#[derivative(Debug(bound = "T: BeaconChainTypes"))] +#[derive(Educe)] +#[educe(Debug(bound(T: BeaconChainTypes)))] pub struct SingleBlockLookup { pub id: Id, pub block_request_state: BlockRequestState, @@ -67,7 +67,7 @@ pub struct SingleBlockLookup { /// the custody request to have an updated view of the peers that claim to have imported the /// block associated with this lookup. The peer set of a lookup can change rapidly, and faster /// than the lifetime of a custody request. - #[derivative(Debug(format_with = "fmt_peer_set_as_len"))] + #[educe(Debug(method(fmt_peer_set_as_len)))] peers: Arc>>, block_root: Hash256, awaiting_parent: Option, @@ -369,10 +369,10 @@ impl SingleBlockLookup { } /// The state of the blob request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct BlobRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub block_root: Hash256, pub state: SingleLookupRequestState>, } @@ -387,10 +387,10 @@ impl BlobRequestState { } /// The state of the custody request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct CustodyRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub block_root: Hash256, pub state: SingleLookupRequestState>, } @@ -405,10 +405,10 @@ impl CustodyRequestState { } /// The state of the block request component of a `SingleBlockLookup`. -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct BlockRequestState { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub requested_block_root: Hash256, pub state: SingleLookupRequestState>>, } diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index d5858c23f11..ed9a11a03de 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -7,11 +7,12 @@ use lighthouse_network::{ BlobsByRangeRequestId, BlocksByRangeRequestId, DataColumnsByRangeRequestId, }, }; +use ssz_types::RuntimeVariableList; use std::{collections::HashMap, sync::Arc}; use tracing::{Span, debug}; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, - Hash256, RuntimeVariableList, SignedBeaconBlock, + Hash256, SignedBeaconBlock, }; use crate::sync::network_context::MAX_COLUMN_RETRIES; @@ -517,11 +518,10 @@ mod tests { #[test] fn no_blobs_into_responses() { - let spec = test_spec::(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { - generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng) .0 .into() }) @@ -540,19 +540,13 @@ mod tests { #[test] fn empty_blobs_into_responses() { - let spec = test_spec::(); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { // Always generate some blobs. - generate_rand_block_and_blobs::( - ForkName::Deneb, - NumBlobs::Number(3), - &mut rng, - &spec, - ) - .0 - .into() + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng) + .0 + .into() }) .collect::>>>(); diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index 014d728ffe4..4ce10e23ca1 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -1331,7 +1331,7 @@ impl SyncingChain { .get(&(self.processing_target + batch_index as u64 * EPOCHS_PER_BATCH)) { visualization_string.push(batch.visualize()); - if batch_index != BATCH_BUFFER_SIZE { + if batch_index < BATCH_BUFFER_SIZE - 1 { // Add a comma in between elements visualization_string.push(','); } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index fc641861754..ef52f896785 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -194,7 +194,7 @@ impl TestRig { ) -> (SignedBeaconBlock, Vec>) { let fork_name = self.fork_name; let rng = &mut self.rng; - generate_rand_block_and_blobs::(fork_name, num_blobs, rng, &self.spec) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng) } fn rand_block_and_data_columns( @@ -1146,10 +1146,8 @@ impl TestRig { #[test] fn stable_rng() { - let spec = types::MainnetEthSpec::default_spec(); let mut rng = XorShiftRng::from_seed([42; 16]); - let (block, _) = - generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec); + let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); assert_eq!( block.canonical_root(), Hash256::from_slice( @@ -1931,8 +1929,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; + use ssz_types::RuntimeVariableList; use std::collections::VecDeque; - use types::RuntimeVariableList; struct DenebTester { rig: TestRig, diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index beaf8188824..6fab7a752a4 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -9,9 +9,11 @@ portable = ["beacon_chain/portable"] [dependencies] bitvec = { workspace = true } -derivative = { workspace = true } +bls = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } @@ -20,6 +22,8 @@ rayon = { workspace = true } serde = { workspace = true } state_processing = { workspace = true } store = { workspace = true } +superstruct = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index f28d8f278a0..897a7e5eccc 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -1,12 +1,13 @@ use crate::attestation_storage::{CompactAttestationRef, CompactIndexedAttestation}; use crate::max_cover::MaxCover; use crate::reward_cache::RewardCache; +use ssz::BitList; use state_processing::common::{ attesting_indices_base::get_attesting_indices, base, get_attestation_participation_flag_indices, }; use std::collections::HashMap; use types::{ - Attestation, BeaconState, BitList, ChainSpec, EthSpec, + Attestation, BeaconState, ChainSpec, EthSpec, beacon_state::BeaconStateBase, consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, }; diff --git a/beacon_node/operation_pool/src/attestation_storage.rs b/beacon_node/operation_pool/src/attestation_storage.rs index 4f1b8b81fe4..9094c9cd4d4 100644 --- a/beacon_node/operation_pool/src/attestation_storage.rs +++ b/beacon_node/operation_pool/src/attestation_storage.rs @@ -1,11 +1,13 @@ use crate::AttestationStats; +use bls::AggregateSignature; use itertools::Itertools; +use ssz::{BitList, BitVector}; use std::collections::{BTreeMap, HashMap, HashSet}; +use superstruct::superstruct; +use typenum::Unsigned; use types::{ - AggregateSignature, Attestation, AttestationData, BeaconState, BitList, BitVector, Checkpoint, - Epoch, EthSpec, Hash256, Slot, Unsigned, + Attestation, AttestationData, BeaconState, Checkpoint, Epoch, EthSpec, Hash256, Slot, attestation::{AttestationBase, AttestationElectra}, - superstruct, }; #[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index cc8809c43e6..485f21b5c8b 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -19,7 +19,7 @@ pub enum ReceivedPreCapella { /// /// Using the LIFO queue for block production disincentivises spam on P2P at the Capella fork, /// and is less-relevant after that. -#[derive(Debug, Default)] +#[derive(Debug, Default, PartialEq, Eq)] pub struct BlsToExecutionChanges { /// Map from validator index to BLS to execution change. by_validator_index: HashMap>>, diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 24e2cfbbb5d..00361450a5b 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -35,12 +35,12 @@ use state_processing::{SigVerifiedOp, VerifyOperation}; use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::marker::PhantomData; use std::ptr; +use typenum::Unsigned; use types::{ AbstractExecPayload, Attestation, AttestationData, AttesterSlashing, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ProposerSlashing, SignedBeaconBlock, SignedBlsToExecutionChange, SignedVoluntaryExit, Slot, SyncAggregate, SyncCommitteeContribution, Validator, sync_aggregate::Error as SyncAggregateError, - typenum::Unsigned, }; type SyncContributions = RwLock>>>; @@ -782,6 +782,7 @@ impl PartialEq for OperationPool { && *self.attester_slashings.read() == *other.attester_slashings.read() && *self.proposer_slashings.read() == *other.proposer_slashings.read() && *self.voluntary_exits.read() == *other.voluntary_exits.read() + && *self.bls_to_execution_changes.read() == *other.bls_to_execution_changes.read() } } @@ -792,6 +793,8 @@ mod release_tests { use beacon_chain::test_utils::{ BeaconChainHarness, EphemeralHarnessType, RelativeSyncCommittee, test_spec, }; + use bls::Keypair; + use fixed_bytes::FixedBytesExtended; use maplit::hashset; use state_processing::epoch_cache::initialize_epoch_cache; use state_processing::{VerifyOperation, common::get_attesting_indices_from_state}; diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 4d754534605..241b5fec53c 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -3,7 +3,7 @@ use crate::OperationPool; use crate::attestation_storage::AttestationMap; use crate::bls_to_execution_changes::{BlsToExecutionChanges, ReceivedPreCapella}; use crate::sync_aggregate_id::SyncAggregateId; -use derivative::Derivative; +use educe::Educe; use parking_lot::RwLock; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -11,6 +11,7 @@ use state_processing::SigVerifiedOp; use std::collections::HashSet; use std::mem; use store::{DBColumn, Error as StoreError, StoreItem}; +use superstruct::superstruct; use types::attestation::AttestationOnDisk; use types::*; @@ -22,10 +23,7 @@ type PersistedSyncContributions = Vec<(SyncAggregateId, Vec( let is_semi_supernode = parse_flag(cli_args, "semi-supernode"); client_config.chain.node_custody_type = if is_supernode { - client_config.network.subscribe_all_data_column_subnets = true; NodeCustodyType::Supernode } else if is_semi_supernode { NodeCustodyType::SemiSupernode diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index 61a8474a731..50028fe73ff 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -15,11 +15,13 @@ db-key = "0.0.5" directory = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } itertools = { workspace = true } leveldb = { version = "0.8.6", optional = true, default-features = false } logging = { workspace = true } lru = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } parking_lot = { workspace = true } redb = { version = "2.1.3", optional = true } safe_arith = { workspace = true } @@ -31,6 +33,7 @@ strum = { workspace = true } superstruct = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } xdelta3 = { workspace = true } zstd = { workspace = true } diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs deleted file mode 100644 index 72e5d9c7af0..00000000000 --- a/beacon_node/store/src/chunked_iter.rs +++ /dev/null @@ -1,120 +0,0 @@ -use crate::chunked_vector::{Chunk, Field, chunk_key}; -use crate::{HotColdDB, ItemStore}; -use tracing::error; -use types::{ChainSpec, EthSpec, Slot}; - -/// Iterator over the values of a `BeaconState` vector field (like `block_roots`). -/// -/// Uses the freezer DB's separate table to load the values. -pub struct ChunkedVectorIter<'a, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - pub(crate) store: &'a HotColdDB, - current_vindex: usize, - pub(crate) end_vindex: usize, - next_cindex: usize, - current_chunk: Chunk, -} - -impl<'a, F, E, Hot, Cold> ChunkedVectorIter<'a, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - /// Create a new iterator which can yield elements from `start_vindex` up to the last - /// index stored by the restore point at `last_restore_point_slot`. - /// - /// The `freezer_upper_limit` slot should be the slot of a recent restore point as obtained from - /// `Root::freezer_upper_limit`. We pass it as a parameter so that the caller can - /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). - pub fn new( - store: &'a HotColdDB, - start_vindex: usize, - freezer_upper_limit: Slot, - spec: &ChainSpec, - ) -> Self { - let (_, end_vindex) = F::start_and_end_vindex(freezer_upper_limit, spec); - - // Set the next chunk to the one containing `start_vindex`. - let next_cindex = start_vindex / F::chunk_size(); - // Set the current chunk to the empty chunk, it will never be read. - let current_chunk = Chunk::default(); - - Self { - store, - current_vindex: start_vindex, - end_vindex, - next_cindex, - current_chunk, - } - } -} - -impl Iterator for ChunkedVectorIter<'_, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - type Item = (usize, F::Value); - - fn next(&mut self) -> Option { - let chunk_size = F::chunk_size(); - - // Range exhausted, return `None` forever. - if self.current_vindex >= self.end_vindex { - None - } - // Value lies in the current chunk, return it. - else if self.current_vindex < self.next_cindex * chunk_size { - let vindex = self.current_vindex; - let val = self - .current_chunk - .values - .get(vindex % chunk_size) - .cloned() - .or_else(|| { - error!( - vector_index = vindex, - "Missing chunk value in forwards iterator" - ); - None - })?; - self.current_vindex += 1; - Some((vindex, val)) - } - // Need to load the next chunk, load it and recurse back into the in-range case. - else { - self.current_chunk = Chunk::load( - &self.store.cold_db, - F::column(), - &chunk_key(self.next_cindex), - ) - .map_err(|e| { - error!( - chunk_index = self.next_cindex, - error = ?e, - "Database error in forwards iterator" - ); - e - }) - .ok()? - .or_else(|| { - error!( - chunk_index = self.next_cindex, - "Missing chunk in forwards iterator" - ); - None - })?; - self.next_cindex += 1; - self.next() - } - } -} diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs deleted file mode 100644 index ee043c14f4e..00000000000 --- a/beacon_node/store/src/chunked_vector.rs +++ /dev/null @@ -1,919 +0,0 @@ -//! Space-efficient storage for `BeaconState` vector fields. -//! -//! This module provides logic for splitting the `Vector` fields of a `BeaconState` into -//! chunks, and storing those chunks in contiguous ranges in the on-disk database. The motiviation -//! for doing this is avoiding massive duplication in every on-disk state. For example, rather than -//! storing the whole `historical_roots` vector, which is updated once every couple of thousand -//! slots, at every slot, we instead store all the historical values as a chunked vector on-disk, -//! and fetch only the slice we need when reconstructing the `historical_roots` of a state. -//! -//! ## Terminology -//! -//! * **Chunk size**: the number of vector values stored per on-disk chunk. -//! * **Vector index** (vindex): index into all the historical values, identifying a single element -//! of the vector being stored. -//! * **Chunk index** (cindex): index into the keyspace of the on-disk database, identifying a chunk -//! of elements. To find the chunk index of a vector index: `cindex = vindex / chunk_size`. -use self::UpdatePattern::*; -use crate::*; -use ssz::{Decode, Encode}; -use types::historical_summary::HistoricalSummary; - -/// Description of how a `BeaconState` field is updated during state processing. -/// -/// When storing a state, this allows us to efficiently store only those entries -/// which are not present in the DB already. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum UpdatePattern { - /// The value is updated once per `n` slots. - OncePerNSlots { - n: u64, - /// The slot at which the field begins to accumulate values. - /// - /// The field should not be read or written until `activation_slot` is reached, and the - /// activation slot should act as an offset when converting slots to vector indices. - activation_slot: Option, - /// The slot at which the field ceases to accumulate values. - /// - /// If this is `None` then the field is continually updated. - deactivation_slot: Option, - }, - /// The value is updated once per epoch, for the epoch `current_epoch - lag`. - OncePerEpoch { lag: u64 }, -} - -/// Map a chunk index to bytes that can be used to key the NoSQL database. -/// -/// We shift chunks up by 1 to make room for a genesis chunk that is handled separately. -pub fn chunk_key(cindex: usize) -> [u8; 8] { - (cindex as u64 + 1).to_be_bytes() -} - -/// Return the database key for the genesis value. -fn genesis_value_key() -> [u8; 8] { - 0u64.to_be_bytes() -} - -/// Trait for types representing fields of the `BeaconState`. -/// -/// All of the required methods are type-level, because we do most things with fields at the -/// type-level. We require their value-level witnesses to be `Copy` so that we can avoid the -/// turbofish when calling functions like `store_updated_vector`. -pub trait Field: Copy { - /// The type of value stored in this field: the `T` from `Vector`. - /// - /// The `Default` impl will be used to fill extra vector entries. - type Value: Default + std::fmt::Debug + milhouse::Value; - // Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug - - /// The length of this field: the `N` from `Vector`. - type Length: Unsigned; - - /// The database column where the integer-indexed chunks for this field should be stored. - /// - /// Each field's column **must** be unique. - fn column() -> DBColumn; - - /// Update pattern for this field, so that we can do differential updates. - fn update_pattern(spec: &ChainSpec) -> UpdatePattern; - - /// The number of values to store per chunk on disk. - /// - /// Default is 128 so that we read/write 4K pages when the values are 32 bytes. - // TODO: benchmark and optimise this parameter - fn chunk_size() -> usize { - 128 - } - - /// Convert a v-index (vector index) to a chunk index. - fn chunk_index(vindex: usize) -> usize { - vindex / Self::chunk_size() - } - - /// Get the value of this field at the given vector index, from the state. - fn get_value( - state: &BeaconState, - vindex: u64, - spec: &ChainSpec, - ) -> Result; - - /// True if this is a `FixedLengthField`, false otherwise. - fn is_fixed_length() -> bool; - - /// Compute the start and end vector indices of the slice of history required at `current_slot`. - /// - /// ## Example - /// - /// If we have a field that is updated once per epoch, then the end vindex will be - /// `current_epoch + 1`, because we want to include the value for the current epoch, and the - /// start vindex will be `end_vindex - Self::Length`, because that's how far back we can look. - fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { - // We take advantage of saturating subtraction on slots and epochs - match Self::update_pattern(spec) { - OncePerNSlots { - n, - activation_slot, - deactivation_slot, - } => { - // Per-slot changes exclude the index for the current slot, because - // it won't be set until the slot completes (think of `state_roots`, `block_roots`). - // This also works for the `historical_roots` because at the `n`th slot, the 0th - // entry of the list is created, and before that the list is empty. - // - // To account for the switch from historical roots to historical summaries at - // Capella we also modify the current slot by the activation and deactivation slots. - // The activation slot acts as an offset (subtraction) while the deactivation slot - // acts as a clamp (min). - let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { - std::cmp::min(current_slot, deactivation_slot) - }); - let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { - slot_with_clamp - activation_slot - } else { - // Return (0, 0) to indicate that the field should not be read/written. - return (0, 0); - }; - let end_vindex = slot_with_clamp_and_offset / n; - let start_vindex = end_vindex - Self::Length::to_u64(); - (start_vindex.as_usize(), end_vindex.as_usize()) - } - OncePerEpoch { lag } => { - // Per-epoch changes include the index for the current epoch, because it - // will have been set at the most recent epoch boundary. - let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let end_epoch = current_epoch + 1 - lag; - let start_epoch = end_epoch + lag - Self::Length::to_u64(); - (start_epoch.as_usize(), end_epoch.as_usize()) - } - } - } - - /// Given an `existing_chunk` stored in the DB, construct an updated chunk to replace it. - fn get_updated_chunk( - existing_chunk: &Chunk, - chunk_index: usize, - start_vindex: usize, - end_vindex: usize, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result, Error> { - let chunk_size = Self::chunk_size(); - let mut new_chunk = Chunk::new(vec![Self::Value::default(); chunk_size]); - - for i in 0..chunk_size { - let vindex = chunk_index * chunk_size + i; - if vindex >= start_vindex && vindex < end_vindex { - let vector_value = Self::get_value(state, vindex as u64, spec)?; - - if let Some(existing_value) = existing_chunk.values.get(i) - && *existing_value != vector_value - && *existing_value != Self::Value::default() - { - return Err(ChunkError::Inconsistent { - field: Self::column(), - chunk_index, - existing_value: format!("{:?}", existing_value), - new_value: format!("{:?}", vector_value), - } - .into()); - } - - new_chunk.values[i] = vector_value; - } else { - new_chunk.values[i] = existing_chunk.values.get(i).cloned().unwrap_or_default(); - } - } - - Ok(new_chunk) - } - - /// Determine whether a state at `slot` possesses (or requires) the genesis value. - fn slot_needs_genesis_value(slot: Slot, spec: &ChainSpec) -> bool { - let (_, end_vindex) = Self::start_and_end_vindex(slot, spec); - match Self::update_pattern(spec) { - // If the end_vindex is less than the length of the vector, then the vector - // has not yet been completely filled with non-genesis values, and so the genesis - // value is still required. - OncePerNSlots { .. } => { - Self::is_fixed_length() && end_vindex < Self::Length::to_usize() - } - // If the field has lag, then it takes an extra `lag` vindices beyond the - // `end_vindex` before the vector has been filled with non-genesis values. - OncePerEpoch { lag } => { - Self::is_fixed_length() && end_vindex + (lag as usize) < Self::Length::to_usize() - } - } - } - - /// Load the genesis value for a fixed length field from the store. - /// - /// This genesis value should be used to fill the initial state of the vector. - fn load_genesis_value>(store: &S) -> Result { - let key = &genesis_value_key()[..]; - let chunk = - Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?; - chunk - .values - .first() - .cloned() - .ok_or_else(|| ChunkError::MissingGenesisValue.into()) - } - - /// Store the given `value` as the genesis value for this field, unless stored already. - /// - /// Check the existing value (if any) for consistency with the value we intend to store, and - /// return an error if they are inconsistent. - fn check_and_store_genesis_value>( - store: &S, - value: Self::Value, - ops: &mut Vec, - ) -> Result<(), Error> { - let key = &genesis_value_key()[..]; - - if let Some(existing_chunk) = Chunk::::load(store, Self::column(), key)? { - if existing_chunk.values.len() != 1 { - Err(ChunkError::InvalidGenesisChunk { - field: Self::column(), - expected_len: 1, - observed_len: existing_chunk.values.len(), - } - .into()) - } else if existing_chunk.values[0] != value { - Err(ChunkError::InconsistentGenesisValue { - field: Self::column(), - existing_value: format!("{:?}", existing_chunk.values[0]), - new_value: format!("{:?}", value), - } - .into()) - } else { - Ok(()) - } - } else { - let chunk = Chunk::new(vec![value]); - chunk.store(Self::column(), &genesis_value_key()[..], ops)?; - Ok(()) - } - } - - /// Extract the genesis value for a fixed length field from an - /// - /// Will only return a correct value if `slot_needs_genesis_value(state.slot(), spec) == true`. - fn extract_genesis_value( - state: &BeaconState, - spec: &ChainSpec, - ) -> Result { - let (_, end_vindex) = Self::start_and_end_vindex(state.slot(), spec); - match Self::update_pattern(spec) { - // Genesis value is guaranteed to exist at `end_vindex`, as it won't yet have been - // updated - OncePerNSlots { .. } => Ok(Self::get_value(state, end_vindex as u64, spec)?), - // If there's lag, the value of the field at the vindex *without the lag* - // should still be set to the genesis value. - OncePerEpoch { lag } => Ok(Self::get_value(state, end_vindex as u64 + lag, spec)?), - } - } -} - -/// Marker trait for fixed-length fields (`Vector`). -pub trait FixedLengthField: Field {} - -/// Marker trait for variable-length fields (`List`). -pub trait VariableLengthField: Field {} - -/// Macro to implement the `Field` trait on a new unit struct type. -macro_rules! field { - ($struct_name:ident, $marker_trait:ident, $value_ty:ty, $length_ty:ty, $column:expr, - $update_pattern:expr, $get_value:expr) => { - #[derive(Clone, Copy)] - pub struct $struct_name; - - impl Field for $struct_name - where - E: EthSpec, - { - type Value = $value_ty; - type Length = $length_ty; - - fn column() -> DBColumn { - $column - } - - fn update_pattern(spec: &ChainSpec) -> UpdatePattern { - let update_pattern = $update_pattern; - update_pattern(spec) - } - - fn get_value( - state: &BeaconState, - vindex: u64, - spec: &ChainSpec, - ) -> Result { - let get_value = $get_value; - get_value(state, vindex, spec) - } - - fn is_fixed_length() -> bool { - stringify!($marker_trait) == "FixedLengthField" - } - } - - impl $marker_trait for $struct_name {} - }; -} - -field!( - BlockRootsChunked, - FixedLengthField, - Hash256, - E::SlotsPerHistoricalRoot, - DBColumn::BeaconBlockRootsChunked, - |_| OncePerNSlots { - n: 1, - activation_slot: Some(Slot::new(0)), - deactivation_slot: None - }, - |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.block_roots(), index) -); - -field!( - StateRootsChunked, - FixedLengthField, - Hash256, - E::SlotsPerHistoricalRoot, - DBColumn::BeaconStateRootsChunked, - |_| OncePerNSlots { - n: 1, - activation_slot: Some(Slot::new(0)), - deactivation_slot: None, - }, - |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.state_roots(), index) -); - -field!( - HistoricalRoots, - VariableLengthField, - Hash256, - E::HistoricalRootsLimit, - DBColumn::BeaconHistoricalRoots, - |spec: &ChainSpec| OncePerNSlots { - n: E::SlotsPerHistoricalRoot::to_u64(), - activation_slot: Some(Slot::new(0)), - deactivation_slot: spec - .capella_fork_epoch - .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), - }, - |state: &BeaconState<_>, index, _| safe_modulo_list_index(state.historical_roots(), index) -); - -field!( - RandaoMixes, - FixedLengthField, - Hash256, - E::EpochsPerHistoricalVector, - DBColumn::BeaconRandaoMixes, - |_| OncePerEpoch { lag: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_vector_index(state.randao_mixes(), index) -); - -field!( - HistoricalSummaries, - VariableLengthField, - HistoricalSummary, - E::HistoricalRootsLimit, - DBColumn::BeaconHistoricalSummaries, - |spec: &ChainSpec| OncePerNSlots { - n: E::SlotsPerHistoricalRoot::to_u64(), - activation_slot: spec - .capella_fork_epoch - .map(|fork_epoch| fork_epoch.start_slot(E::slots_per_epoch())), - deactivation_slot: None, - }, - |state: &BeaconState<_>, index, _| safe_modulo_list_index( - state - .historical_summaries() - .map_err(|_| ChunkError::InvalidFork)?, - index - ) -); - -pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( - field: F, - store: &S, - state: &BeaconState, - spec: &ChainSpec, - ops: &mut Vec, -) -> Result<(), Error> { - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot(), spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - // Store the genesis value if we have access to it, and it hasn't been stored already. - if F::slot_needs_genesis_value(state.slot(), spec) { - let genesis_value = F::extract_genesis_value(state, spec)?; - F::check_and_store_genesis_value(store, genesis_value, ops)?; - } - - // Start by iterating backwards from the last chunk, storing new chunks in the database. - // Stop once a chunk in the database matches what we were about to store, this indicates - // that a previously stored state has already filled-in a portion of the indices covered. - let full_range_checked = store_range( - field, - (start_cindex..=end_cindex).rev(), - start_vindex, - end_vindex, - store, - state, - spec, - ops, - )?; - - // If the previous `store_range` did not check the entire range, it may be the case that the - // state's vector includes elements at low vector indices that are not yet stored in the - // database, so run another `store_range` to ensure these values are also stored. - if !full_range_checked { - store_range( - field, - start_cindex..end_cindex, - start_vindex, - end_vindex, - store, - state, - spec, - ops, - )?; - } - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -fn store_range( - _: F, - range: I, - start_vindex: usize, - end_vindex: usize, - store: &S, - state: &BeaconState, - spec: &ChainSpec, - ops: &mut Vec, -) -> Result -where - F: Field, - E: EthSpec, - S: KeyValueStore, - I: Iterator, -{ - for chunk_index in range { - let chunk_key = &chunk_key(chunk_index)[..]; - - let existing_chunk = - Chunk::::load(store, F::column(), chunk_key)?.unwrap_or_default(); - - let new_chunk = F::get_updated_chunk( - &existing_chunk, - chunk_index, - start_vindex, - end_vindex, - state, - spec, - )?; - - if new_chunk == existing_chunk { - return Ok(false); - } - - new_chunk.store(F::column(), chunk_key, ops)?; - } - - Ok(true) -} - -// Chunks at the end index are included. -// TODO: could be more efficient with a real range query (perhaps RocksDB) -fn range_query, E: EthSpec, T: Decode + Encode>( - store: &S, - column: DBColumn, - start_index: usize, - end_index: usize, -) -> Result>, Error> { - let range = start_index..=end_index; - let len = range - .end() - // Add one to account for inclusive range. - .saturating_add(1) - .saturating_sub(*range.start()); - let mut result = Vec::with_capacity(len); - - for chunk_index in range { - let key = &chunk_key(chunk_index)[..]; - let chunk = Chunk::load(store, column, key)?.ok_or(ChunkError::Missing { chunk_index })?; - result.push(chunk); - } - - Ok(result) -} - -/// Combine chunks to form a list or vector of all values with vindex in `start_vindex..end_vindex`. -/// -/// The `length` parameter is the length of the vec to construct, with entries set to `default` if -/// they lie outside the vindex range. -fn stitch( - chunks: Vec>, - start_vindex: usize, - end_vindex: usize, - chunk_size: usize, - length: usize, - default: T, -) -> Result, ChunkError> { - if start_vindex + length < end_vindex { - return Err(ChunkError::OversizedRange { - start_vindex, - end_vindex, - length, - }); - } - - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let mut result = vec![default; length]; - - for (chunk_index, chunk) in (start_cindex..=end_cindex).zip(chunks.into_iter()) { - // All chunks but the last chunk must be full-sized - if chunk_index != end_cindex && chunk.values.len() != chunk_size { - return Err(ChunkError::InvalidSize { - chunk_index, - expected: chunk_size, - actual: chunk.values.len(), - }); - } - - // Copy the chunk entries into the result vector - for (i, value) in chunk.values.into_iter().enumerate() { - let vindex = chunk_index * chunk_size + i; - - if vindex >= start_vindex && vindex < end_vindex { - result[vindex % length] = value; - } - } - } - - Ok(result) -} - -pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore>( - store: &S, - slot: Slot, - spec: &ChainSpec, -) -> Result, Error> { - // Do a range query - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let chunks = range_query(store, F::column(), start_cindex, end_cindex)?; - - let default = if F::slot_needs_genesis_value(slot, spec) { - F::load_genesis_value(store)? - } else { - F::Value::default() - }; - - let result = stitch( - chunks, - start_vindex, - end_vindex, - chunk_size, - F::Length::to_usize(), - default, - )?; - - Ok(Vector::new(result).map_err(ChunkError::Milhouse)?) -} - -/// The historical roots are stored in vector chunks, despite not actually being a vector. -pub fn load_variable_list_from_db, E: EthSpec, S: KeyValueStore>( - store: &S, - slot: Slot, - spec: &ChainSpec, -) -> Result, Error> { - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let chunks: Vec> = range_query(store, F::column(), start_cindex, end_cindex)?; - - let mut result = Vec::with_capacity(chunk_size * chunks.len()); - - for (chunk_index, chunk) in chunks.into_iter().enumerate() { - for (i, value) in chunk.values.into_iter().enumerate() { - let vindex = chunk_index * chunk_size + i; - - if vindex >= start_vindex && vindex < end_vindex { - result.push(value); - } - } - } - - Ok(List::new(result).map_err(ChunkError::Milhouse)?) -} - -/// Index into a `List` field of the state, avoiding out of bounds and division by 0. -fn safe_modulo_list_index( - values: &List, - index: u64, -) -> Result { - if values.is_empty() { - Err(ChunkError::ZeroLengthList) - } else { - values - .get(index as usize % values.len()) - .copied() - .ok_or(ChunkError::IndexOutOfBounds { index }) - } -} - -fn safe_modulo_vector_index( - values: &Vector, - index: u64, -) -> Result { - if values.is_empty() { - Err(ChunkError::ZeroLengthVector) - } else { - values - .get(index as usize % values.len()) - .copied() - .ok_or(ChunkError::IndexOutOfBounds { index }) - } -} - -/// A chunk of a fixed-size vector from the `BeaconState`, stored in the database. -#[derive(Debug, Clone, PartialEq)] -pub struct Chunk { - /// A vector of up-to `chunk_size` values. - pub values: Vec, -} - -impl Default for Chunk -where - T: Decode + Encode, -{ - fn default() -> Self { - Chunk { values: vec![] } - } -} - -impl Chunk -where - T: Decode + Encode, -{ - pub fn new(values: Vec) -> Self { - Chunk { values } - } - - pub fn load, E: EthSpec>( - store: &S, - column: DBColumn, - key: &[u8], - ) -> Result, Error> { - store - .get_bytes(column, key)? - .map(|bytes| Self::decode(&bytes)) - .transpose() - } - - pub fn store( - &self, - column: DBColumn, - key: &[u8], - ops: &mut Vec, - ) -> Result<(), Error> { - ops.push(KeyValueStoreOp::PutKeyValue( - column, - key.to_vec(), - self.encode()?, - )); - Ok(()) - } - - /// Attempt to decode a single chunk. - pub fn decode(bytes: &[u8]) -> Result { - if !::is_ssz_fixed_len() { - return Err(Error::from(ChunkError::InvalidType)); - } - - let value_size = ::ssz_fixed_len(); - - if value_size == 0 { - return Err(Error::from(ChunkError::InvalidType)); - } - - let values = bytes - .chunks(value_size) - .map(T::from_ssz_bytes) - .collect::>()?; - - Ok(Chunk { values }) - } - - pub fn encoded_size(&self) -> usize { - self.values.len() * ::ssz_fixed_len() - } - - /// Encode a single chunk as bytes. - pub fn encode(&self) -> Result, Error> { - if !::is_ssz_fixed_len() { - return Err(Error::from(ChunkError::InvalidType)); - } - - Ok(self.values.iter().flat_map(T::as_ssz_bytes).collect()) - } -} - -#[derive(Debug, PartialEq)] -pub enum ChunkError { - ZeroLengthVector, - ZeroLengthList, - IndexOutOfBounds { - index: u64, - }, - InvalidSize { - chunk_index: usize, - expected: usize, - actual: usize, - }, - Missing { - chunk_index: usize, - }, - MissingGenesisValue, - Inconsistent { - field: DBColumn, - chunk_index: usize, - existing_value: String, - new_value: String, - }, - InconsistentGenesisValue { - field: DBColumn, - existing_value: String, - new_value: String, - }, - InvalidGenesisChunk { - field: DBColumn, - expected_len: usize, - observed_len: usize, - }, - InvalidType, - OversizedRange { - start_vindex: usize, - end_vindex: usize, - length: usize, - }, - InvalidFork, - Milhouse(milhouse::Error), -} - -impl From for ChunkError { - fn from(e: milhouse::Error) -> ChunkError { - Self::Milhouse(e) - } -} - -#[cfg(test)] -mod test { - use super::*; - use types::MainnetEthSpec as TestSpec; - use types::*; - - fn v(i: u64) -> Hash256 { - Hash256::from_low_u64_be(i) - } - - #[test] - fn stitch_default() { - let chunk_size = 4; - - let chunks = vec![ - Chunk::new(vec![0u64, 1, 2, 3]), - Chunk::new(vec![4, 5, 0, 0]), - ]; - - assert_eq!( - stitch(chunks, 2, 6, chunk_size, 12, 99).unwrap(), - vec![99, 99, 2, 3, 4, 5, 99, 99, 99, 99, 99, 99] - ); - } - - #[test] - fn stitch_basic() { - let chunk_size = 4; - let default = v(0); - - let chunks = vec![ - Chunk::new(vec![v(0), v(1), v(2), v(3)]), - Chunk::new(vec![v(4), v(5), v(6), v(7)]), - Chunk::new(vec![v(8), v(9), v(10), v(11)]), - ]; - - assert_eq!( - stitch(chunks.clone(), 0, 12, chunk_size, 12, default).unwrap(), - (0..12).map(v).collect::>() - ); - - assert_eq!( - stitch(chunks, 2, 10, chunk_size, 8, default).unwrap(), - vec![v(8), v(9), v(2), v(3), v(4), v(5), v(6), v(7)] - ); - } - - #[test] - fn stitch_oversized_range() { - let chunk_size = 4; - let default = 0; - - let chunks = vec![Chunk::new(vec![20u64, 21, 22, 23])]; - - // Args (start_vindex, end_vindex, length) - let args = vec![(0, 21, 20), (0, 2048, 1024), (0, 2, 1)]; - - for (start_vindex, end_vindex, length) in args { - assert_eq!( - stitch( - chunks.clone(), - start_vindex, - end_vindex, - chunk_size, - length, - default - ), - Err(ChunkError::OversizedRange { - start_vindex, - end_vindex, - length, - }) - ); - } - } - - #[test] - fn fixed_length_fields() { - fn test_fixed_length>(_: F, expected: bool) { - assert_eq!(F::is_fixed_length(), expected); - } - test_fixed_length(BlockRootsChunked, true); - test_fixed_length(StateRootsChunked, true); - test_fixed_length(HistoricalRoots, false); - test_fixed_length(RandaoMixes, true); - } - - fn needs_genesis_value_once_per_slot>(_: F) { - let spec = &TestSpec::default_spec(); - let max = F::Length::to_u64(); - for i in 0..max { - assert!( - F::slot_needs_genesis_value(Slot::new(i), spec), - "slot {}", - i - ); - } - assert!(!F::slot_needs_genesis_value(Slot::new(max), spec)); - } - - #[test] - fn needs_genesis_value_block_roots() { - needs_genesis_value_once_per_slot(BlockRootsChunked); - } - - #[test] - fn needs_genesis_value_state_roots() { - needs_genesis_value_once_per_slot(StateRootsChunked); - } - - #[test] - fn needs_genesis_value_historical_roots() { - let spec = &TestSpec::default_spec(); - assert!( - !>::slot_needs_genesis_value(Slot::new(0), spec) - ); - } - - fn needs_genesis_value_test_randao>(_: F) { - let spec = &TestSpec::default_spec(); - let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1); - for i in 0..max { - assert!( - F::slot_needs_genesis_value(Slot::new(i), spec), - "slot {}", - i - ); - } - assert!(!F::slot_needs_genesis_value(Slot::new(max), spec)); - } - - #[test] - fn needs_genesis_value_randao() { - needs_genesis_value_test_randao(RandaoMixes); - } -} diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index c0f15f2417b..0aa00e659bc 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,12 +1,12 @@ use crate::hdiff::HierarchyConfig; -use crate::superstruct; use crate::{DBColumn, Error, StoreItem}; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::io::{Read, Write}; use std::num::NonZeroUsize; -use strum::{Display, EnumString, EnumVariantNames}; +use strum::{Display, EnumString, VariantNames}; +use superstruct::superstruct; use types::EthSpec; use types::non_zero_usize::new_non_zero_usize; use zstd::{Decoder, Encoder}; @@ -267,7 +267,7 @@ mod test { } #[derive( - Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, + Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Display, EnumString, VariantNames, )] #[strum(serialize_all = "lowercase")] pub enum DatabaseBackend { diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 8fdd5812eab..6b8c6156315 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -3,6 +3,7 @@ use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::{ ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, get_key_for_col, metrics, }; +use fixed_bytes::FixedBytesExtended; use leveldb::{ compaction::Compaction, database::{ @@ -16,7 +17,7 @@ use leveldb::{ use std::collections::HashSet; use std::marker::PhantomData; use std::path::Path; -use types::{EthSpec, FixedBytesExtended, Hash256}; +use types::{EthSpec, Hash256}; use super::interface::WriteOptions; diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index f62647ae545..a07cc838863 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,4 +1,3 @@ -use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; use crate::hot_cold_store::{HotColdDBError, StateSummaryIteratorError}; use crate::{DBColumn, hdiff}; @@ -6,16 +5,14 @@ use crate::{DBColumn, hdiff}; use leveldb::error::Error as LevelDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot, milhouse}; +use types::{BeaconStateError, EpochCacheError, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; #[derive(Debug)] pub enum Error { SszDecodeError(DecodeError), - VectorChunkError(ChunkError), BeaconStateError(BeaconStateError), - PartialBeaconStateError, HotColdDBError(HotColdDBError), DBError { message: String, @@ -126,12 +123,6 @@ impl From for Error { } } -impl From for Error { - fn from(e: ChunkError) -> Error { - Error::VectorChunkError(e) - } -} - impl From for Error { fn from(e: HotColdDBError) -> Error { Error::HotColdDBError(e) diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs index 3e20aab9bf0..323c87a9142 100644 --- a/beacon_node/store/src/hdiff.rs +++ b/beacon_node/store/src/hdiff.rs @@ -2,6 +2,7 @@ use crate::{DBColumn, StoreConfig, StoreItem, metrics}; use bls::PublicKeyBytes; use itertools::Itertools; +use milhouse::List; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -11,7 +12,7 @@ use std::str::FromStr; use std::sync::LazyLock; use superstruct::superstruct; use types::historical_summary::HistoricalSummary; -use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, List, Slot, Validator}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, Hash256, Slot, Validator}; static EMPTY_PUBKEY: LazyLock = LazyLock::new(PublicKeyBytes::empty); diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e926caa9c77..c4137191744 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -18,6 +18,7 @@ use crate::{ metrics::{self, COLD_METRIC, HOT_METRIC}, parse_data_column_key, }; +use fixed_bytes::FixedBytesExtended; use itertools::{Itertools, process_results}; use lru::LruCache; use parking_lot::{Mutex, RwLock}; @@ -38,6 +39,7 @@ use std::path::Path; use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, instrument, warn}; +use typenum::Unsigned; use types::data_column_sidecar::{ColumnIndex, DataColumnSidecar, DataColumnSidecarList}; use types::*; use zstd::{Decoder, Encoder}; @@ -1314,8 +1316,13 @@ impl, Cold: ItemStore> HotColdDB state_root.as_slice().to_vec(), )); - if let Some(slot) = slot { - match self.hot_storage_strategy(slot)? { + // NOTE: `hot_storage_strategy` can error if there are states in the database + // prior to the `anchor_slot`. This can happen if checkpoint sync has been + // botched and left some states in the database prior to completing. + if let Some(slot) = slot + && let Ok(strategy) = self.hot_storage_strategy(slot) + { + match strategy { StorageStrategy::Snapshot => { // Full state stored in this position key_value_batch.push(KeyValueStoreOp::DeleteKey( diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 88d509731c8..e2b666e5973 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -2,9 +2,9 @@ use crate::errors::HandleUnavailable; use crate::{Error, HotColdDB, ItemStore}; use std::borrow::Cow; use std::marker::PhantomData; +use typenum::Unsigned; use types::{ BeaconState, BeaconStateError, BlindedPayload, EthSpec, Hash256, SignedBeaconBlock, Slot, - typenum::Unsigned, }; /// Implemented for types that have ancestors (e.g., blocks, states) that may be iterated over. @@ -387,8 +387,8 @@ mod test { use crate::{MemoryStore, StoreConfig as Config}; use beacon_chain::test_utils::BeaconChainHarness; use beacon_chain::types::MainnetEthSpec; + use fixed_bytes::FixedBytesExtended; use std::sync::Arc; - use types::FixedBytesExtended; fn get_state() -> BeaconState { let harness = BeaconChainHarness::builder(E::default()) diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index a3d4e4a8cea..ae5b2e1e571 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -8,8 +8,6 @@ //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. pub mod blob_sidecar_list_from_root; -pub mod chunked_iter; -pub mod chunked_vector; pub mod config; pub mod consensus_context; pub mod errors; @@ -21,7 +19,6 @@ mod impls; mod memory_store; pub mod metadata; pub mod metrics; -pub mod partial_beacon_state; pub mod reconstruct; pub mod state_cache; diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs deleted file mode 100644 index 13b0dfab9f7..00000000000 --- a/beacon_node/store/src/partial_beacon_state.rs +++ /dev/null @@ -1,487 +0,0 @@ -use crate::chunked_vector::{ - BlockRootsChunked, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRootsChunked, - load_variable_list_from_db, load_vector_from_db, -}; -use crate::{DBColumn, Error, KeyValueStore, KeyValueStoreOp}; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use types::historical_summary::HistoricalSummary; -use types::superstruct; -use types::*; - -/// DEPRECATED Lightweight variant of the `BeaconState` that is stored in the database. -/// -/// Utilises lazy-loading from separate storage for its vector fields. -/// -/// This can be deleted once schema versions prior to V22 are no longer supported. -#[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), - variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) -)] -#[derive(Debug, PartialEq, Clone, Encode)] -#[ssz(enum_behaviour = "transparent")] -pub struct PartialBeaconState -where - E: EthSpec, -{ - // Versioning - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - #[superstruct(getter(copy))] - pub slot: Slot, - pub fork: Fork, - - // History - pub latest_block_header: BeaconBlockHeader, - - #[ssz(skip_serializing, skip_deserializing)] - pub block_roots: Option>, - #[ssz(skip_serializing, skip_deserializing)] - pub state_roots: Option>, - - #[ssz(skip_serializing, skip_deserializing)] - pub historical_roots: Option>, - - // Ethereum 1.0 chain data - pub eth1_data: Eth1Data, - pub eth1_data_votes: List, - pub eth1_deposit_index: u64, - - // Registry - pub validators: List, - pub balances: List, - - // Shuffling - /// Randao value from the current slot, for patching into the per-epoch randao vector. - pub latest_randao_value: Hash256, - #[ssz(skip_serializing, skip_deserializing)] - pub randao_mixes: Option>, - - // Slashings - slashings: Vector, - - // Attestations (genesis fork only) - #[superstruct(only(Base))] - pub previous_epoch_attestations: List, E::MaxPendingAttestations>, - #[superstruct(only(Base))] - pub current_epoch_attestations: List, E::MaxPendingAttestations>, - - // Participation (Altair and later) - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas))] - pub previous_epoch_participation: List, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas))] - pub current_epoch_participation: List, - - // Finality - pub justification_bits: BitVector, - pub previous_justified_checkpoint: Checkpoint, - pub current_justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, - - // Inactivity - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas))] - pub inactivity_scores: List, - - // Light-client sync committees - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas))] - pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas))] - pub next_sync_committee: Arc>, - - // Execution - #[superstruct( - only(Bellatrix), - partial_getter(rename = "latest_execution_payload_header_bellatrix") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderBellatrix, - #[superstruct( - only(Capella), - partial_getter(rename = "latest_execution_payload_header_capella") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, - #[superstruct( - only(Deneb), - partial_getter(rename = "latest_execution_payload_header_deneb") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderDeneb, - #[superstruct( - only(Electra), - partial_getter(rename = "latest_execution_payload_header_electra") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, - #[superstruct( - only(Fulu), - partial_getter(rename = "latest_execution_payload_header_fulu") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, - #[superstruct( - only(Gloas), - partial_getter(rename = "latest_execution_payload_header_gloas") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderGloas, - - // Capella - #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas))] - pub next_withdrawal_index: u64, - #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas))] - pub next_withdrawal_validator_index: u64, - - #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas))] - pub historical_summaries: Option>, - - // Electra - #[superstruct(only(Electra, Fulu, Gloas))] - pub deposit_requests_start_index: u64, - #[superstruct(only(Electra, Fulu, Gloas))] - pub deposit_balance_to_consume: u64, - #[superstruct(only(Electra, Fulu, Gloas))] - pub exit_balance_to_consume: u64, - #[superstruct(only(Electra, Fulu, Gloas))] - pub earliest_exit_epoch: Epoch, - #[superstruct(only(Electra, Fulu, Gloas))] - pub consolidation_balance_to_consume: u64, - #[superstruct(only(Electra, Fulu, Gloas))] - pub earliest_consolidation_epoch: Epoch, - - #[superstruct(only(Electra, Fulu, Gloas))] - pub pending_deposits: List, - #[superstruct(only(Electra, Fulu, Gloas))] - pub pending_partial_withdrawals: - List, - #[superstruct(only(Electra, Fulu, Gloas))] - pub pending_consolidations: List, - #[superstruct(only(Fulu, Gloas))] - pub proposer_lookahead: Vector, -} - -impl PartialBeaconState { - /// SSZ decode. - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). - let slot_offset = ::ssz_fixed_len() + ::ssz_fixed_len(); - let slot_len = ::ssz_fixed_len(); - let slot_bytes = bytes.get(slot_offset..slot_offset + slot_len).ok_or( - DecodeError::InvalidByteLength { - len: bytes.len(), - expected: slot_offset + slot_len, - }, - )?; - - let slot = Slot::from_ssz_bytes(slot_bytes)?; - let fork_at_slot = spec.fork_name_at_slot::(slot); - - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) - } - - /// Prepare the partial state for storage in the KV database. - pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { - KeyValueStoreOp::PutKeyValue( - DBColumn::BeaconState, - state_root.as_slice().to_vec(), - self.as_ssz_bytes(), - ) - } - - pub fn load_block_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.block_roots().is_none() { - *self.block_roots_mut() = Some(load_vector_from_db::( - store, - self.slot(), - spec, - )?); - } - Ok(()) - } - - pub fn load_state_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.state_roots().is_none() { - *self.state_roots_mut() = Some(load_vector_from_db::( - store, - self.slot(), - spec, - )?); - } - Ok(()) - } - - pub fn load_historical_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.historical_roots().is_none() { - *self.historical_roots_mut() = Some( - load_variable_list_from_db::(store, self.slot(), spec)?, - ); - } - Ok(()) - } - - pub fn load_historical_summaries>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - let slot = self.slot(); - if let Ok(historical_summaries) = self.historical_summaries_mut() - && historical_summaries.is_none() - { - *historical_summaries = Some(load_variable_list_from_db::( - store, slot, spec, - )?); - } - Ok(()) - } - - pub fn load_randao_mixes>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.randao_mixes().is_none() { - // Load the per-epoch values from the database - let mut randao_mixes = - load_vector_from_db::(store, self.slot(), spec)?; - - // Patch the value for the current slot into the index for the current epoch - let current_epoch = self.slot().epoch(E::slots_per_epoch()); - let len = randao_mixes.len(); - *randao_mixes - .get_mut(current_epoch.as_usize() % len) - .ok_or(Error::RandaoMixOutOfBounds)? = *self.latest_randao_value(); - - *self.randao_mixes_mut() = Some(randao_mixes) - } - Ok(()) - } -} - -/// Implement the conversion from PartialBeaconState -> BeaconState. -macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { - BeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $inner.genesis_time, - genesis_validators_root: $inner.genesis_validators_root, - slot: $inner.slot, - fork: $inner.fork, - - // History - latest_block_header: $inner.latest_block_header, - block_roots: unpack_field($inner.block_roots)?, - state_roots: unpack_field($inner.state_roots)?, - historical_roots: unpack_field($inner.historical_roots)?, - - // Eth1 - eth1_data: $inner.eth1_data, - eth1_data_votes: $inner.eth1_data_votes, - eth1_deposit_index: $inner.eth1_deposit_index, - - // Validator registry - validators: $inner.validators, - balances: $inner.balances, - - // Shuffling - randao_mixes: unpack_field($inner.randao_mixes)?, - - // Slashings - slashings: $inner.slashings, - - // Finality - justification_bits: $inner.justification_bits, - previous_justified_checkpoint: $inner.previous_justified_checkpoint, - current_justified_checkpoint: $inner.current_justified_checkpoint, - finalized_checkpoint: $inner.finalized_checkpoint, - - // Caching - total_active_balance: <_>::default(), - progressive_balances_cache: <_>::default(), - committee_caches: <_>::default(), - pubkey_cache: <_>::default(), - exit_cache: <_>::default(), - slashings_cache: <_>::default(), - epoch_cache: <_>::default(), - - // Variant-specific fields - $( - $extra_fields: $inner.$extra_fields - ),*, - - // Variant-specific optional fields - $( - $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? - ),* - }) - } -} - -fn unpack_field(x: Option) -> Result { - x.ok_or(Error::PartialBeaconStateError) -} - -impl TryInto> for PartialBeaconState { - type Error = Error; - - fn try_into(self) -> Result, Error> { - let state = match self { - PartialBeaconState::Base(inner) => impl_try_into_beacon_state!( - inner, - Base, - BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations], - [] - ), - PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( - inner, - Altair, - BeaconStateAltair, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ], - [] - ), - PartialBeaconState::Bellatrix(inner) => impl_try_into_beacon_state!( - inner, - Bellatrix, - BeaconStateBellatrix, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ], - [] - ), - PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( - inner, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - PartialBeaconState::Deneb(inner) => impl_try_into_beacon_state!( - inner, - Deneb, - BeaconStateDeneb, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - PartialBeaconState::Electra(inner) => impl_try_into_beacon_state!( - inner, - Electra, - BeaconStateElectra, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index, - deposit_requests_start_index, - deposit_balance_to_consume, - exit_balance_to_consume, - earliest_exit_epoch, - consolidation_balance_to_consume, - earliest_consolidation_epoch, - pending_deposits, - pending_partial_withdrawals, - pending_consolidations - ], - [historical_summaries] - ), - PartialBeaconState::Fulu(inner) => impl_try_into_beacon_state!( - inner, - Fulu, - BeaconStateFulu, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index, - deposit_requests_start_index, - deposit_balance_to_consume, - exit_balance_to_consume, - earliest_exit_epoch, - consolidation_balance_to_consume, - earliest_consolidation_epoch, - pending_deposits, - pending_partial_withdrawals, - pending_consolidations, - proposer_lookahead - ], - [historical_summaries] - ), - PartialBeaconState::Gloas(inner) => impl_try_into_beacon_state!( - inner, - Gloas, - BeaconStateGloas, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index, - deposit_requests_start_index, - deposit_balance_to_consume, - exit_balance_to_consume, - earliest_exit_epoch, - consolidation_balance_to_consume, - earliest_consolidation_epoch, - pending_deposits, - pending_partial_withdrawals, - pending_consolidations, - proposer_lookahead - ], - [historical_summaries] - ), - }; - Ok(state) - } -} diff --git a/book/src/advanced_blobs.md b/book/src/advanced_blobs.md index 3c005e74312..e06bdb9fb9a 100644 --- a/book/src/advanced_blobs.md +++ b/book/src/advanced_blobs.md @@ -2,7 +2,7 @@ With the [Fusaka](https://ethereum.org/roadmap/fusaka) upgrade, the main feature [PeerDAS](https://ethereum.org/roadmap/fusaka#peerdas) allows storing only a portion of blob data, known as data columns, thus reducing the storage and bandwidth requirements of a full node. This however also means that a full node will not be able to serve blobs after Fusaka. To continue serving blobs, run the beacon node with `--semi-supernode` or `--supernode`. Note that this comes at a significant increase in storage and bandwidth requirements, see [this blog post about PeerDAS](https://blog.sigmaprime.io/peerdas-distributed-blob-building.html) and [Fusaka bandwidth estimation](https://ethpandaops.io/posts/fusaka-bandwidth-estimation/) for more details. -> Note: the above assumes that the beacon node has no attached validators. If the beacon node has attached validators, then it is required to custody (store) a certain number of data columns which increases with the number of staked ETH. For example, if the staked ETH is `$\geq$` 2048 ETH, then due to custody requirement, it will make the beacon node a semi-supernode ; if `$\geq$` 4096 ETH, the beacon node will be a supernode without needing the flag. +> Note: the above assumes that the beacon node has no attached validators. If the beacon node has attached validators, then it is required to custody (store) a certain number of data columns which increases with the number of staked ETH. For example, if the staked ETH is >= 2048 ETH, then due to custody requirement, it will make the beacon node a semi-supernode ; if >= 4096 ETH, the beacon node will be a supernode without needing the flag. Table below summarizes the role of relevant flags in Lighthouse beacon node: @@ -17,7 +17,7 @@ While both `--supernode` and `--semi-supernode` can serve blobs, a supernode wil Combining `--prune-blobs false` and `--supernode` (or `--semi-supernode`) implies that no data columns will be pruned, and the node will be able to serve blobs since using the flag. -If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blobs-backfill`. However, do note that this is an experimental feature and it may cause some issues, e.g., the node may block most of its peers. +If you want historical blob data beyond the data availability period (18 days), you can backfill blobs or data columns with the experimental flag `--complete-blob-backfill`. However, do note that this is an experimental feature and it only works when the flag is present during a fresh checkpoint sync when the database is initialised. The flag will not backfill blobs if the node is already running (with an existing database). During blob backfill, the feature may cause some issues, e.g., the node may block most of its peers. **⚠️ The following section on Blobs is archived and not maintained as blobs are stored in the form of data columns after the Fulu fork ⚠️** diff --git a/book/src/advanced_checkpoint_sync.md b/book/src/advanced_checkpoint_sync.md index 9cc18dda8c3..7c30598928b 100644 --- a/book/src/advanced_checkpoint_sync.md +++ b/book/src/advanced_checkpoint_sync.md @@ -160,7 +160,7 @@ curl -H "Accept: application/octet-stream" "http://localhost:5052/eth/v1/beacon/ where `$SLOT` is the slot number. A slot which is an epoch boundary slot (i.e., first slot of an epoch) should always be used for manual checkpoint sync. -If the block contains blobs, all state, block and blobs must be provided and must point to the same slot. The +If the block contains blobs, all state, block and blobs must be provided and must point to the same slot (only applies for slots before Fulu). The state may be from the same slot as the block (unadvanced), or advanced to an epoch boundary, in which case it will be assumed to be finalized at that epoch. diff --git a/book/src/advanced_database_migrations.md b/book/src/advanced_database_migrations.md index 3552a90b0e8..115a8858780 100644 --- a/book/src/advanced_database_migrations.md +++ b/book/src/advanced_database_migrations.md @@ -17,6 +17,7 @@ validator client or the slasher**. | Lighthouse version | Release date | Schema version | Downgrade available? | |--------------------|--------------|----------------|----------------------| +| v8.0.0 | Nov 2025 | v28 | yes before Fulu | | v8.0.0-rc.0 | Sep 2025 | v28 | yes before Fulu | | v7.1.0 | Jul 2025 | v26 | yes | | v7.0.0 | Apr 2025 | v22 | no | diff --git a/book/src/advanced_proposer_only.md b/book/src/advanced_proposer_only.md index f55e51606cf..1ef7a066559 100644 --- a/book/src/advanced_proposer_only.md +++ b/book/src/advanced_proposer_only.md @@ -23,9 +23,7 @@ normal activities such as performing attestations, but it will make the node harder to identify as a potential node to attack and will also consume less resources. -Specifically, this flag reduces the default peer count (to a safe minimal -number as maintaining peers on attestation subnets do not need to be considered), -prevents the node from subscribing to any attestation-subnets or +Specifically, this flag prevents the node from subscribing to any attestation-subnets or sync-committees which is a primary way for attackers to de-anonymize validators. diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index f804cb9df2e..0442bf4ec09 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -447,6 +447,27 @@ indicating that all states with slots `>= 0` are available, i.e., full state his on the specific meanings of these fields see the docs on [Checkpoint Sync](./advanced_checkpoint_sync.md#how-to-run-an-archived-node). +## `/lighthouse/custody/info` + +Information about data columns custody info. + +```bash +curl "http://localhost:5052/lighthouse/custody/info" | jq +``` + +```json +{ + "earliest_custodied_data_column_slot": "8823040", + "custody_group_count": "4", + "custody_columns": [ + "117", + "72", + "31", + "79" + ] +} +``` + ## `/lighthouse/custody/backfill` Starts a custody backfill sync from the next epoch with the node's latest custody requirements. The sync won't begin immediately, it waits until the next epoch is finalized before triggering. diff --git a/book/src/contributing_setup.md b/book/src/contributing_setup.md index b817faad879..958e8f71f6e 100644 --- a/book/src/contributing_setup.md +++ b/book/src/contributing_setup.md @@ -71,6 +71,47 @@ $ cargo nextest run -p safe_arith Summary [ 0.012s] 8 tests run: 8 passed, 0 skipped ``` +### Integration tests + +Due to the size and complexity of the test suite, Lighthouse uses a pattern that differs from how +[integration tests are usually defined](https://doc.rust-lang.org/rust-by-example/testing/integration_testing.html). +This pattern helps manage large test suites more effectively and ensures tests only run in release +mode to avoid stack overflow issues. + +#### The "main pattern" + +For packages with integration tests that require more than one file, Lighthouse uses the following +structure: + +- A `main.rs` file is defined at `package/tests/main.rs` that declares other test files as modules +- In `package/Cargo.toml`, integration tests are explicitly configured: + + ```toml + [package] + autotests = false + + [[test]] + name = "package_tests" + path = "tests/main.rs" + ``` + +#### Rust Analyzer configuration + +This pattern, combined with `#![cfg(not(debug_assertions))]` directives in test files (which +prevent tests from running in debug mode), causes Rust Analyzer to not provide IDE services like +autocomplete and error checking in integration test files by default. + +To enable IDE support for these test files, configure Rust Analyzer to disable debug assertions. +For VSCode users, this is already configured in the repository's `.vscode/settings.json` file: + +```json +{ + "rust-analyzer.cargo.cfgs": [ + "!debug_assertions" + ] +} +``` + ### test_logger The test_logger, located in `/common/logging/` can be used to create a `Logger` that by diff --git a/book/src/help_vc.md b/book/src/help_vc.md index b19ff0ba388..2a9936d1d2f 100644 --- a/book/src/help_vc.md +++ b/book/src/help_vc.md @@ -221,6 +221,10 @@ Flags: automatically enabled for <= 64 validators. Enabling this metric for higher validator counts will lead to higher volume of prometheus metrics being collected. + --graffiti-append + When used, client version info will be prepended to user custom + graffiti, with a space in between. This should only be used with a + Lighthouse beacon node. -h, --help Prints help information --http diff --git a/book/src/ui_installation.md b/book/src/ui_installation.md index 5a785650049..82f5d755bcb 100644 --- a/book/src/ui_installation.md +++ b/book/src/ui_installation.md @@ -138,13 +138,13 @@ Navigate to the backend directory `cd backend`. Install all required Node packag After initializing the backend, return to the root directory. Install all frontend dependencies by executing `yarn`. Build the frontend using `yarn build`. Start the frontend production server with `yarn start`. -This will allow you to access siren at `http://localhost:3000` by default. +This will allow you to access siren at `http://localhost:3300` by default. ## Advanced configuration ### About self-signed SSL certificates -By default, internally, Siren is running on port 80 (plain, behind nginx), port 3000 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). +By default, internally, Siren is running on port 80 (plain, behind nginx), port 3300 (plain, direct) and port 443 (with SSL, behind nginx)). Siren will generate and use a self-signed certificate on startup. This will generate a security warning when you try to access the interface. We recommend to only disable SSL if you would access Siren over a local LAN or otherwise highly trusted or encrypted network (i.e. VPN). #### Generating persistent SSL certificates and installing them to your system diff --git a/boot_node/Cargo.toml b/boot_node/Cargo.toml index 798869c2557..0df18017f0e 100644 --- a/boot_node/Cargo.toml +++ b/boot_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "boot_node" -version = "8.0.1" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index 00c74a13038..d0a3e487c43 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -6,6 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } eth2_wallet = { workspace = true } filesystem = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 596d50de420..bffdfcc38bd 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,6 +4,7 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; +use bls::PublicKey; use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; @@ -12,7 +13,7 @@ use std::fs::{self, File, create_dir_all}; use std::io; use std::path::{Path, PathBuf}; use tracing::error; -use types::{Address, PublicKey, graffiti::GraffitiString}; +use types::{Address, graffiti::GraffitiString}; use validator_dir::VOTING_KEYSTORE_FILE; use zeroize::Zeroizing; diff --git a/common/compare_fields/Cargo.toml b/common/compare_fields/Cargo.toml deleted file mode 100644 index 50e7e5f21d2..00000000000 --- a/common/compare_fields/Cargo.toml +++ /dev/null @@ -1,14 +0,0 @@ -[package] -name = "compare_fields" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[package.metadata.cargo-udeps.ignore] -development = ["compare_fields_derive"] # used in doc-tests - -[dependencies] -itertools = { workspace = true } - -[dev-dependencies] -compare_fields_derive = { workspace = true } diff --git a/common/compare_fields/src/lib.rs b/common/compare_fields/src/lib.rs deleted file mode 100644 index 27baf148067..00000000000 --- a/common/compare_fields/src/lib.rs +++ /dev/null @@ -1,197 +0,0 @@ -//! Provides field-by-field comparisons for structs and vecs. -//! -//! Returns comparisons as data, without making assumptions about the desired equality (e.g., -//! does not `panic!` on inequality). -//! -//! Note: `compare_fields_derive` requires `PartialEq` and `Debug` implementations. -//! -//! ## Example -//! -//! ```rust -//! use compare_fields::{CompareFields, Comparison, FieldComparison}; -//! use compare_fields_derive::CompareFields; -//! -//! #[derive(PartialEq, Debug, CompareFields)] -//! pub struct Bar { -//! a: u64, -//! b: u16, -//! #[compare_fields(as_slice)] -//! c: Vec -//! } -//! -//! #[derive(Clone, PartialEq, Debug, CompareFields)] -//! pub struct Foo { -//! d: String -//! } -//! -//! let cat = Foo {d: "cat".to_string()}; -//! let dog = Foo {d: "dog".to_string()}; -//! let chicken = Foo {d: "chicken".to_string()}; -//! -//! let mut bar_a = Bar { -//! a: 42, -//! b: 12, -//! c: vec![ cat.clone(), dog.clone() ], -//! }; -//! -//! let mut bar_b = Bar { -//! a: 42, -//! b: 99, -//! c: vec![ chicken.clone(), dog.clone()] -//! }; -//! -//! let cat_dog = Comparison::Child(FieldComparison { -//! field_name: "d".to_string(), -//! equal: false, -//! a: "\"cat\"".to_string(), -//! b: "\"dog\"".to_string(), -//! }); -//! assert_eq!(cat.compare_fields(&dog), vec![cat_dog]); -//! -//! let bar_a_b = vec![ -//! Comparison::Child(FieldComparison { -//! field_name: "a".to_string(), -//! equal: true, -//! a: "42".to_string(), -//! b: "42".to_string(), -//! }), -//! Comparison::Child(FieldComparison { -//! field_name: "b".to_string(), -//! equal: false, -//! a: "12".to_string(), -//! b: "99".to_string(), -//! }), -//! Comparison::Parent{ -//! field_name: "c".to_string(), -//! equal: false, -//! children: vec![ -//! FieldComparison { -//! field_name: "0".to_string(), -//! equal: false, -//! a: "Some(Foo { d: \"cat\" })".to_string(), -//! b: "Some(Foo { d: \"chicken\" })".to_string(), -//! }, -//! FieldComparison { -//! field_name: "1".to_string(), -//! equal: true, -//! a: "Some(Foo { d: \"dog\" })".to_string(), -//! b: "Some(Foo { d: \"dog\" })".to_string(), -//! } -//! ] -//! } -//! ]; -//! assert_eq!(bar_a.compare_fields(&bar_b), bar_a_b); -//! ``` -use itertools::{EitherOrBoth, Itertools}; -use std::fmt::Debug; - -#[derive(Debug, PartialEq, Clone)] -pub enum Comparison { - Child(FieldComparison), - Parent { - field_name: String, - equal: bool, - children: Vec, - }, -} - -impl Comparison { - pub fn child>(field_name: String, a: &T, b: &T) -> Self { - Comparison::Child(FieldComparison::new(field_name, a, b)) - } - - pub fn parent(field_name: String, equal: bool, children: Vec) -> Self { - Comparison::Parent { - field_name, - equal, - children, - } - } - - pub fn from_slice>(field_name: String, a: &[T], b: &[T]) -> Self { - Self::from_iter(field_name, a.iter(), b.iter()) - } - - pub fn from_into_iter<'a, T: Debug + PartialEq + 'a>( - field_name: String, - a: impl IntoIterator, - b: impl IntoIterator, - ) -> Self { - Self::from_iter(field_name, a.into_iter(), b.into_iter()) - } - - pub fn from_iter<'a, T: Debug + PartialEq + 'a>( - field_name: String, - a: impl Iterator, - b: impl Iterator, - ) -> Self { - let mut children = vec![]; - let mut all_equal = true; - - for (i, entry) in a.zip_longest(b).enumerate() { - let comparison = match entry { - EitherOrBoth::Both(x, y) => { - FieldComparison::new(format!("{i}"), &Some(x), &Some(y)) - } - EitherOrBoth::Left(x) => FieldComparison::new(format!("{i}"), &Some(x), &None), - EitherOrBoth::Right(y) => FieldComparison::new(format!("{i}"), &None, &Some(y)), - }; - all_equal = all_equal && comparison.equal(); - children.push(comparison); - } - - Self::parent(field_name, all_equal, children) - } - - pub fn retain_children(&mut self, f: F) - where - F: FnMut(&FieldComparison) -> bool, - { - match self { - Comparison::Child(_) => (), - Comparison::Parent { children, .. } => children.retain(f), - } - } - - pub fn equal(&self) -> bool { - match self { - Comparison::Child(fc) => fc.equal, - Comparison::Parent { equal, .. } => *equal, - } - } - - pub fn not_equal(&self) -> bool { - !self.equal() - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct FieldComparison { - pub field_name: String, - pub equal: bool, - pub a: String, - pub b: String, -} - -pub trait CompareFields { - fn compare_fields(&self, b: &Self) -> Vec; -} - -impl FieldComparison { - pub fn new>(field_name: String, a: &T, b: &T) -> Self { - Self { - field_name, - equal: a == b, - a: format!("{a:?}"), - b: format!("{b:?}"), - } - } - - pub fn equal(&self) -> bool { - self.equal - } - - pub fn not_equal(&self) -> bool { - !self.equal() - } -} diff --git a/common/compare_fields_derive/Cargo.toml b/common/compare_fields_derive/Cargo.toml deleted file mode 100644 index 19682bf3673..00000000000 --- a/common/compare_fields_derive/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "compare_fields_derive" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[lib] -proc-macro = true - -[dependencies] -quote = { workspace = true } -syn = { workspace = true } diff --git a/common/compare_fields_derive/src/lib.rs b/common/compare_fields_derive/src/lib.rs deleted file mode 100644 index 35299707214..00000000000 --- a/common/compare_fields_derive/src/lib.rs +++ /dev/null @@ -1,70 +0,0 @@ -use proc_macro::TokenStream; -use quote::quote; -use syn::{DeriveInput, parse_macro_input}; - -fn is_iter(field: &syn::Field) -> bool { - field.attrs.iter().any(|attr| { - attr.path.is_ident("compare_fields") - && (attr.tokens.to_string().replace(' ', "") == "(as_slice)" - || attr.tokens.to_string().replace(' ', "") == "(as_iter)") - }) -} - -#[proc_macro_derive(CompareFields, attributes(compare_fields))] -pub fn compare_fields_derive(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as DeriveInput); - - let name = &item.ident; - let (impl_generics, ty_generics, where_clause) = &item.generics.split_for_impl(); - - let syn::Data::Struct(struct_data) = &item.data else { - panic!("compare_fields_derive only supports structs."); - }; - - let mut quotes = vec![]; - - for field in struct_data.fields.iter() { - let Some(ident_a) = &field.ident else { - panic!("compare_fields_derive only supports named struct fields."); - }; - let field_name = ident_a.to_string(); - let ident_b = ident_a.clone(); - - let quote = if is_iter(field) { - quote! { - comparisons.push(compare_fields::Comparison::from_into_iter( - #field_name.to_string(), - &self.#ident_a, - &b.#ident_b - )); - } - } else { - quote! { - comparisons.push( - compare_fields::Comparison::child( - #field_name.to_string(), - &self.#ident_a, - &b.#ident_b - ) - ); - } - }; - - quotes.push(quote); - } - - let output = quote! { - impl #impl_generics compare_fields::CompareFields for #name #ty_generics #where_clause { - fn compare_fields(&self, b: &Self) -> Vec { - let mut comparisons = vec![]; - - #( - #quotes - )* - - comparisons - } - } - }; - output.into() -} diff --git a/common/deposit_contract/Cargo.toml b/common/deposit_contract/Cargo.toml index 767f67b853f..76c18ef2429 100644 --- a/common/deposit_contract/Cargo.toml +++ b/common/deposit_contract/Cargo.toml @@ -7,8 +7,12 @@ edition = { workspace = true } build = "build.rs" [dependencies] -ethabi = "16.0.0" +alloy-dyn-abi = { workspace = true } +alloy-json-abi = { workspace = true } +alloy-primitives = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } +serde_json = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } diff --git a/common/deposit_contract/build.rs b/common/deposit_contract/build.rs index cae1d480c81..2061d13c243 100644 --- a/common/deposit_contract/build.rs +++ b/common/deposit_contract/build.rs @@ -153,14 +153,13 @@ fn verify_checksum(bytes: &[u8], expected_checksum: &str) { /// Returns the directory that will be used to store the deposit contract ABI. fn abi_dir() -> PathBuf { - let base = env::var("CARGO_MANIFEST_DIR") - .expect("should know manifest dir") + let base = env::var("OUT_DIR") + .expect("should know out dir") .parse::() - .expect("should parse manifest dir as path") - .join("contracts"); + .expect("should parse out dir as path"); std::fs::create_dir_all(base.clone()) - .expect("should be able to create abi directory in manifest"); + .expect("should be able to create abi directory in out dir"); base } diff --git a/common/deposit_contract/src/lib.rs b/common/deposit_contract/src/lib.rs index 7d58240f11b..6200a4ca158 100644 --- a/common/deposit_contract/src/lib.rs +++ b/common/deposit_contract/src/lib.rs @@ -1,82 +1,124 @@ -use ethabi::{Contract, Token}; +use alloy_dyn_abi::{DynSolValue, JsonAbiExt}; +use alloy_json_abi::JsonAbi; +use alloy_primitives::FixedBytes; +use bls::{PublicKeyBytes, SignatureBytes}; use ssz::{Decode, DecodeError as SszDecodeError, Encode}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, PublicKeyBytes, SignatureBytes}; - -pub use ethabi::Error; +use types::{DepositData, Hash256}; #[derive(Debug)] -pub enum DecodeError { - EthabiError(ethabi::Error), +pub enum Error { + AlloyCoreError(alloy_json_abi::Error), + SerdeJsonError(serde_json::Error), + DynAbiError(alloy_dyn_abi::Error), SszDecodeError(SszDecodeError), + FunctionNotFound, MissingField, UnableToGetBytes, MissingToken, InadequateBytes, } -impl From for DecodeError { - fn from(e: ethabi::Error) -> DecodeError { - DecodeError::EthabiError(e) +impl From for Error { + fn from(e: alloy_json_abi::Error) -> Error { + Error::AlloyCoreError(e) + } +} + +impl From for Error { + fn from(e: serde_json::Error) -> Error { + Error::SerdeJsonError(e) + } +} + +impl From for Error { + fn from(e: alloy_dyn_abi::Error) -> Error { + Error::DynAbiError(e) + } +} + +impl From for Error { + fn from(e: SszDecodeError) -> Error { + Error::SszDecodeError(e) } } pub const CONTRACT_DEPLOY_GAS: usize = 4_000_000; pub const DEPOSIT_GAS: usize = 400_000; -pub const ABI: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.json"); -pub const BYTECODE: &[u8] = include_bytes!("../contracts/v0.12.1_validator_registration.bytecode"); +pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.json" +)); +pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_validator_registration.bytecode" +)); pub const DEPOSIT_DATA_LEN: usize = 420; // lol pub mod testnet { - pub const ABI: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.json"); - pub const BYTECODE: &[u8] = - include_bytes!("../contracts/v0.12.1_testnet_validator_registration.bytecode"); + pub const ABI: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.json" + )); + pub const BYTECODE: &[u8] = include_bytes!(concat!( + env!("OUT_DIR"), + "/v0.12.1_testnet_validator_registration.bytecode" + )); } pub fn encode_eth1_tx_data(deposit_data: &DepositData) -> Result, Error> { let params = vec![ - Token::Bytes(deposit_data.pubkey.as_ssz_bytes()), - Token::Bytes(deposit_data.withdrawal_credentials.as_ssz_bytes()), - Token::Bytes(deposit_data.signature.as_ssz_bytes()), - Token::FixedBytes(deposit_data.tree_hash_root().as_ssz_bytes()), + DynSolValue::Bytes(deposit_data.pubkey.as_ssz_bytes()), + DynSolValue::Bytes(deposit_data.withdrawal_credentials.as_ssz_bytes()), + DynSolValue::Bytes(deposit_data.signature.as_ssz_bytes()), + DynSolValue::FixedBytes( + FixedBytes::<32>::from_slice(&deposit_data.tree_hash_root().as_ssz_bytes()), + 32, + ), ]; // Here we make an assumption that the `crate::testnet::ABI` has a superset of the features of // the crate::ABI`. - let abi = Contract::load(ABI)?; - let function = abi.function("deposit")?; - function.encode_input(¶ms) + let abi: JsonAbi = serde_json::from_slice(ABI)?; + let function = abi + .function("deposit") + .and_then(|functions| functions.first()) + .ok_or(Error::FunctionNotFound)?; + + function + .abi_encode_input(¶ms) + .map_err(Error::DynAbiError) } -pub fn decode_eth1_tx_data( - bytes: &[u8], - amount: u64, -) -> Result<(DepositData, Hash256), DecodeError> { - let abi = Contract::load(ABI)?; - let function = abi.function("deposit")?; - let mut tokens = function.decode_input(bytes.get(4..).ok_or(DecodeError::InadequateBytes)?)?; +pub fn decode_eth1_tx_data(bytes: &[u8], amount: u64) -> Result<(DepositData, Hash256), Error> { + let abi: JsonAbi = serde_json::from_slice(ABI)?; + let function = abi + .function("deposit") + .and_then(|functions| functions.first()) + .ok_or(Error::FunctionNotFound)?; + + let input_data = bytes.get(4..).ok_or(Error::InadequateBytes)?; + let mut tokens = function.abi_decode_input(input_data)?; macro_rules! decode_token { - ($type: ty, $to_fn: ident) => { - <$type>::from_ssz_bytes( - &tokens - .pop() - .ok_or_else(|| DecodeError::MissingToken)? - .$to_fn() - .ok_or_else(|| DecodeError::UnableToGetBytes)?, - ) - .map_err(DecodeError::SszDecodeError)? - }; + ($type: ty) => {{ + let token = tokens.pop().ok_or(Error::MissingToken)?; + let bytes_data = match token { + DynSolValue::Bytes(b) => b, + DynSolValue::FixedBytes(b, _) => b.to_vec(), + _ => return Err(Error::UnableToGetBytes), + }; + <$type>::from_ssz_bytes(&bytes_data)? + }}; } - let root = decode_token!(Hash256, into_fixed_bytes); + let root = decode_token!(Hash256); let deposit_data = DepositData { amount, - signature: decode_token!(SignatureBytes, into_bytes), - withdrawal_credentials: decode_token!(Hash256, into_bytes), - pubkey: decode_token!(PublicKeyBytes, into_bytes), + signature: decode_token!(SignatureBytes), + withdrawal_credentials: decode_token!(Hash256), + pubkey: decode_token!(PublicKeyBytes), }; Ok((deposit_data, root)) @@ -85,10 +127,8 @@ pub fn decode_eth1_tx_data( #[cfg(test)] mod tests { use super::*; - use types::{ - ChainSpec, EthSpec, Keypair, MinimalEthSpec, Signature, - test_utils::generate_deterministic_keypair, - }; + use bls::{Keypair, Signature}; + use types::{ChainSpec, EthSpec, MinimalEthSpec, test_utils::generate_deterministic_keypair}; type E = MinimalEthSpec; diff --git a/common/eip_3076/Cargo.toml b/common/eip_3076/Cargo.toml index 851ef26238a..058e1fd1a0a 100644 --- a/common/eip_3076/Cargo.toml +++ b/common/eip_3076/Cargo.toml @@ -11,7 +11,9 @@ json = ["dep:serde_json"] [dependencies] arbitrary = { workspace = true, features = ["derive"], optional = true } +bls = { workspace = true } ethereum_serde_utils = { workspace = true } +fixed_bytes = { workspace = true } serde = { workspace = true } serde_json = { workspace = true, optional = true } types = { workspace = true } diff --git a/common/eip_3076/src/lib.rs b/common/eip_3076/src/lib.rs index 2d47a77de40..cdd05d7b1ed 100644 --- a/common/eip_3076/src/lib.rs +++ b/common/eip_3076/src/lib.rs @@ -1,9 +1,10 @@ +use bls::PublicKeyBytes; use serde::{Deserialize, Serialize}; use std::cmp::max; use std::collections::{HashMap, HashSet}; #[cfg(feature = "json")] use std::io; -use types::{Epoch, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug)] pub enum Error { @@ -170,9 +171,9 @@ impl Interchange { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use std::fs::File; use tempfile::tempdir; - use types::FixedBytesExtended; fn get_interchange() -> Interchange { Interchange { diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 46066a559f8..da8aba5ded9 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -5,35 +5,35 @@ authors = ["Paul Hauner "] edition = { workspace = true } [features] -default = ["lighthouse"] -lighthouse = [] +default = [] +lighthouse = ["proto_array", "eth2_keystore", "eip_3076", "zeroize"] +events = ["reqwest-eventsource", "futures", "futures-util"] [dependencies] -derivative = { workspace = true } -eip_3076 = { workspace = true } -either = { workspace = true } -enr = { version = "0.13.0", features = ["ed25519"] } -eth2_keystore = { workspace = true } +bls = { workspace = true } +context_deserialize = { workspace = true } +educe = { workspace = true } +eip_3076 = { workspace = true, optional = true } +eth2_keystore = { workspace = true, optional = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } -futures = { workspace = true } -futures-util = "0.3.8" -libp2p-identity = { version = "0.2", features = ["peerid"] } +futures = { workspace = true, optional = true } +futures-util = { version = "0.3.8", optional = true } mediatype = "0.19.13" -multiaddr = "0.18.2" pretty_reqwest_error = { workspace = true } -proto_array = { workspace = true } -rand = { workspace = true } +proto_array = { workspace = true, optional = true } reqwest = { workspace = true } -reqwest-eventsource = "0.5.0" +reqwest-eventsource = { version = "0.6.0", optional = true } sensitive_url = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } ssz_types = { workspace = true } -test_random_derive = { path = "../../common/test_random_derive" } +superstruct = { workspace = true } types = { workspace = true } -zeroize = { workspace = true } +zeroize = { workspace = true, optional = true } [dev-dependencies] +rand = { workspace = true } +test_random_derive = { path = "../../common/test_random_derive" } tokio = { workspace = true } diff --git a/consensus/types/src/beacon_response.rs b/common/eth2/src/beacon_response.rs similarity index 97% rename from consensus/types/src/beacon_response.rs rename to common/eth2/src/beacon_response.rs index fc59fc94329..d58734997ce 100644 --- a/consensus/types/src/beacon_response.rs +++ b/common/eth2/src/beacon_response.rs @@ -1,12 +1,8 @@ -use crate::{ContextDeserialize, ForkName}; +use context_deserialize::ContextDeserialize; use serde::de::DeserializeOwned; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::value::Value; - -pub trait ForkVersionDecode: Sized { - /// SSZ decode with explicit fork variant. - fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; -} +use types::ForkName; /// The metadata of type M should be set to `EmptyMetadata` if you don't care about adding fields other than /// version. If you *do* care about adding other fields you can mix in any type that implements diff --git a/common/eth2/src/error.rs b/common/eth2/src/error.rs new file mode 100644 index 00000000000..1f21220b798 --- /dev/null +++ b/common/eth2/src/error.rs @@ -0,0 +1,167 @@ +//! Centralized error handling for eth2 API clients +//! +//! This module consolidates all error types, response processing, +//! and recovery logic for both beacon node and validator client APIs. + +use pretty_reqwest_error::PrettyReqwestError; +use reqwest::{Response, StatusCode}; +use sensitive_url::SensitiveUrl; +use serde::{Deserialize, Serialize}; +use std::{fmt, path::PathBuf}; + +/// Main error type for eth2 API clients +#[derive(Debug)] +pub enum Error { + /// The `reqwest` client raised an error. + HttpClient(PrettyReqwestError), + #[cfg(feature = "events")] + /// The `reqwest_eventsource` client raised an error. + SseClient(Box), + /// The server returned an error message where the body was able to be parsed. + ServerMessage(ErrorMessage), + /// The server returned an error message with an array of errors. + ServerIndexedMessage(IndexedErrorMessage), + /// The server returned an error message where the body was unable to be parsed. + StatusCode(StatusCode), + /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. + InvalidUrl(SensitiveUrl), + /// The supplied validator client secret is invalid. + InvalidSecret(String), + /// The server returned a response with an invalid signature. It may be an impostor. + InvalidSignatureHeader, + /// The server returned a response without a signature header. It may be an impostor. + MissingSignatureHeader, + /// The server returned an invalid JSON response. + InvalidJson(serde_json::Error), + /// The server returned an invalid server-sent event. + InvalidServerSentEvent(String), + /// The server sent invalid response headers. + InvalidHeaders(String), + /// The server returned an invalid SSZ response. + InvalidSsz(ssz::DecodeError), + /// An I/O error occurred while loading an API token from disk. + TokenReadError(PathBuf, std::io::Error), + /// The client has been configured without a server pubkey, but requires one for this request. + NoServerPubkey, + /// The client has been configured without an API token, but requires one for this request. + NoToken, +} + +/// An API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct ErrorMessage { + pub code: u16, + pub message: String, + #[serde(default)] + pub stacktraces: Vec, +} + +/// An indexed API error serializable to JSON. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct IndexedErrorMessage { + pub code: u16, + pub message: String, + pub failures: Vec, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct Failure { + pub index: u64, + pub message: String, +} + +impl Failure { + pub fn new(index: usize, message: String) -> Self { + Self { + index: index as u64, + message, + } + } +} + +/// Server error response variants +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ResponseError { + Indexed(IndexedErrorMessage), + Message(ErrorMessage), +} + +impl Error { + /// If the error has a HTTP status code, return it. + pub fn status(&self) -> Option { + match self { + Error::HttpClient(error) => error.inner().status(), + #[cfg(feature = "events")] + Error::SseClient(error) => { + if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { + Some(*status) + } else { + None + } + } + Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), + Error::StatusCode(status) => Some(*status), + Error::InvalidUrl(_) => None, + Error::InvalidSecret(_) => None, + Error::InvalidSignatureHeader => None, + Error::MissingSignatureHeader => None, + Error::InvalidJson(_) => None, + Error::InvalidSsz(_) => None, + Error::InvalidServerSentEvent(_) => None, + Error::InvalidHeaders(_) => None, + Error::TokenReadError(..) => None, + Error::NoServerPubkey | Error::NoToken => None, + } + } +} + +impl From for Error { + fn from(error: reqwest::Error) -> Self { + Error::HttpClient(error.into()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// Returns `Ok(response)` if the response is a `200 OK`, `202 ACCEPTED`, or `204 NO_CONTENT` +/// Otherwise, creates an appropriate error message. +pub async fn ok_or_error(response: Response) -> Result { + let status = response.status(); + + if status == StatusCode::OK + || status == StatusCode::ACCEPTED + || status == StatusCode::NO_CONTENT + { + Ok(response) + } else if let Ok(message) = response.json::().await { + match message { + ResponseError::Message(message) => Err(Error::ServerMessage(message)), + ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), + } + } else { + Err(Error::StatusCode(status)) + } +} + +/// Returns `Ok(response)` if the response is a success (2xx) response. Otherwise, creates an +/// appropriate error message. +pub async fn success_or_error(response: Response) -> Result { + let status = response.status(); + + if status.is_success() { + Ok(response) + } else if let Ok(message) = response.json().await { + match message { + ResponseError::Message(message) => Err(Error::ServerMessage(message)), + ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), + } + } else { + Err(Error::StatusCode(status)) + } +} diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 995e6966eae..8746e3c063c 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -7,6 +7,8 @@ //! Eventually it would be ideal to publish this crate on crates.io, however we have some local //! dependencies preventing this presently. +pub mod beacon_response; +pub mod error; #[cfg(feature = "lighthouse")] pub mod lighthouse; #[cfg(feature = "lighthouse")] @@ -14,27 +16,35 @@ pub mod lighthouse_vc; pub mod mixin; pub mod types; +pub use beacon_response::{ + BeaconResponse, EmptyMetadata, ExecutionOptimisticFinalizedBeaconResponse, + ExecutionOptimisticFinalizedMetadata, ForkVersionedResponse, UnversionedResponse, +}; + +pub use self::error::{Error, ok_or_error, success_or_error}; +pub use reqwest; +pub use reqwest::{StatusCode, Url}; +pub use sensitive_url::SensitiveUrl; + use self::mixin::{RequestAccept, ResponseOptional}; -use self::types::{Error as ResponseError, *}; -use ::types::beacon_response::ExecutionOptimisticFinalizedBeaconResponse; -use derivative::Derivative; +use self::types::*; +use bls::SignatureBytes; +use context_deserialize::ContextDeserialize; +use educe::Educe; +#[cfg(feature = "events")] use futures::Stream; +#[cfg(feature = "events")] use futures_util::StreamExt; -use libp2p_identity::PeerId; -use pretty_reqwest_error::PrettyReqwestError; -pub use reqwest; use reqwest::{ Body, IntoUrl, RequestBuilder, Response, header::{HeaderMap, HeaderValue}, }; -pub use reqwest::{StatusCode, Url}; +#[cfg(feature = "events")] use reqwest_eventsource::{Event, EventSource}; -pub use sensitive_url::{SensitiveError, SensitiveUrl}; use serde::{Serialize, de::DeserializeOwned}; use ssz::Encode; use std::fmt; use std::future::Future; -use std::path::PathBuf; use std::time::Duration; pub const V1: EndpointVersion = EndpointVersion(1); @@ -68,83 +78,6 @@ const HTTP_GET_DEPOSIT_SNAPSHOT_QUOTIENT: u32 = 4; const HTTP_GET_VALIDATOR_BLOCK_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_DEFAULT_TIMEOUT_QUOTIENT: u32 = 4; -#[derive(Debug)] -pub enum Error { - /// The `reqwest` client raised an error. - HttpClient(PrettyReqwestError), - /// The `reqwest_eventsource` client raised an error. - SseClient(Box), - /// The server returned an error message where the body was able to be parsed. - ServerMessage(ErrorMessage), - /// The server returned an error message with an array of errors. - ServerIndexedMessage(IndexedErrorMessage), - /// The server returned an error message where the body was unable to be parsed. - StatusCode(StatusCode), - /// The supplied URL is badly formatted. It should look something like `http://127.0.0.1:5052`. - InvalidUrl(SensitiveUrl), - /// The supplied validator client secret is invalid. - InvalidSecret(String), - /// The server returned a response with an invalid signature. It may be an impostor. - InvalidSignatureHeader, - /// The server returned a response without a signature header. It may be an impostor. - MissingSignatureHeader, - /// The server returned an invalid JSON response. - InvalidJson(serde_json::Error), - /// The server returned an invalid server-sent event. - InvalidServerSentEvent(String), - /// The server sent invalid response headers. - InvalidHeaders(String), - /// The server returned an invalid SSZ response. - InvalidSsz(ssz::DecodeError), - /// An I/O error occurred while loading an API token from disk. - TokenReadError(PathBuf, std::io::Error), - /// The client has been configured without a server pubkey, but requires one for this request. - NoServerPubkey, - /// The client has been configured without an API token, but requires one for this request. - NoToken, -} - -impl From for Error { - fn from(error: reqwest::Error) -> Self { - Error::HttpClient(error.into()) - } -} - -impl Error { - /// If the error has a HTTP status code, return it. - pub fn status(&self) -> Option { - match self { - Error::HttpClient(error) => error.inner().status(), - Error::SseClient(error) => { - if let reqwest_eventsource::Error::InvalidStatusCode(status, _) = error.as_ref() { - Some(*status) - } else { - None - } - } - Error::ServerMessage(msg) => StatusCode::try_from(msg.code).ok(), - Error::ServerIndexedMessage(msg) => StatusCode::try_from(msg.code).ok(), - Error::StatusCode(status) => Some(*status), - Error::InvalidUrl(_) => None, - Error::InvalidSecret(_) => None, - Error::InvalidSignatureHeader => None, - Error::MissingSignatureHeader => None, - Error::InvalidJson(_) => None, - Error::InvalidSsz(_) => None, - Error::InvalidServerSentEvent(_) => None, - Error::InvalidHeaders(_) => None, - Error::TokenReadError(..) => None, - Error::NoServerPubkey | Error::NoToken => None, - } - } -} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - /// A struct to define a variety of different timeouts for different validator tasks to ensure /// proper fallback behaviour. #[derive(Clone, Debug, PartialEq, Eq)] @@ -212,10 +145,10 @@ impl Timeouts { /// A wrapper around `reqwest::Client` which provides convenience methods for interfacing with a /// Lighthouse Beacon Node HTTP server (`http_api`). -#[derive(Clone, Debug, Derivative)] -#[derivative(PartialEq)] +#[derive(Clone, Debug, Educe)] +#[educe(PartialEq)] pub struct BeaconNodeHttpClient { - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] client: reqwest::Client, server: SensitiveUrl, timeouts: Timeouts, @@ -229,12 +162,6 @@ impl fmt::Display for BeaconNodeHttpClient { } } -impl AsRef for BeaconNodeHttpClient { - fn as_ref(&self) -> &str { - self.server.as_ref() - } -} - impl BeaconNodeHttpClient { pub fn new(server: SensitiveUrl, timeouts: Timeouts) -> Self { Self { @@ -255,10 +182,14 @@ impl BeaconNodeHttpClient { timeouts, } } + // Returns a reference to the `SensitiveUrl` of the server. + pub fn server(&self) -> &SensitiveUrl { + &self.server + } /// Return the path with the standard `/eth/vX` prefix applied. fn eth_path(&self, version: EndpointVersion) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -904,7 +835,8 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_pending_deposits( &self, state_id: StateId, - ) -> Result>>, Error> { + ) -> Result>>, Error> + { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -914,7 +846,9 @@ impl BeaconNodeHttpClient { .push(&state_id.to_string()) .push("pending_deposits"); - self.get_opt(path).await + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/states/{state_id}/pending_partial_withdrawals` @@ -923,8 +857,10 @@ impl BeaconNodeHttpClient { pub async fn get_beacon_states_pending_partial_withdrawals( &self, state_id: StateId, - ) -> Result>>, Error> - { + ) -> Result< + Option>>, + Error, + > { let mut path = self.eth_path(V1)?; path.path_segments_mut() @@ -934,7 +870,9 @@ impl BeaconNodeHttpClient { .push(&state_id.to_string()) .push("pending_partial_withdrawals"); - self.get_opt(path).await + self.get_fork_contextual(path, |fork| fork) + .await + .map(|opt| opt.map(BeaconResponse::ForkVersioned)) } /// `GET beacon/states/{state_id}/pending_consolidations` @@ -2045,7 +1983,7 @@ impl BeaconNodeHttpClient { /// `GET node/peers/{peer_id}` pub async fn get_node_peers_by_id( &self, - peer_id: PeerId, + peer_id: &str, ) -> Result, Error> { let mut path = self.eth_path(V1)?; @@ -2053,7 +1991,7 @@ impl BeaconNodeHttpClient { .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("node") .push("peers") - .push(&peer_id.to_string()); + .push(peer_id); self.get(path).await } @@ -2269,6 +2207,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, builder_booster_factor: Option, + graffiti_policy: Option, ) -> Result { let mut path = self.eth_path(V3)?; @@ -2296,6 +2235,14 @@ impl BeaconNodeHttpClient { .append_pair("builder_boost_factor", &builder_booster_factor.to_string()); } + // Only append the HTTP URL request if the graffiti_policy is to AppendClientVersions + // If PreserveUserGraffiti (default), then the HTTP URL request does not contain graffiti_policy + // so that the default case is compliant to the spec + if let Some(GraffitiPolicy::AppendClientVersions) = graffiti_policy { + path.query_pairs_mut() + .append_pair("graffiti_policy", "AppendClientVersions"); + } + Ok(path) } @@ -2306,6 +2253,7 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, builder_booster_factor: Option, + graffiti_policy: Option, ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular( slot, @@ -2313,6 +2261,7 @@ impl BeaconNodeHttpClient { graffiti, SkipRandaoVerification::No, builder_booster_factor, + graffiti_policy, ) .await } @@ -2325,6 +2274,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, builder_booster_factor: Option, + graffiti_policy: Option, ) -> Result<(JsonProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self .get_validator_blocks_v3_path( @@ -2333,6 +2283,7 @@ impl BeaconNodeHttpClient { graffiti, skip_randao_verification, builder_booster_factor, + graffiti_policy, ) .await?; @@ -2375,6 +2326,7 @@ impl BeaconNodeHttpClient { randao_reveal: &SignatureBytes, graffiti: Option<&Graffiti>, builder_booster_factor: Option, + graffiti_policy: Option, ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { self.get_validator_blocks_v3_modular_ssz::( slot, @@ -2382,6 +2334,7 @@ impl BeaconNodeHttpClient { graffiti, SkipRandaoVerification::No, builder_booster_factor, + graffiti_policy, ) .await } @@ -2394,6 +2347,7 @@ impl BeaconNodeHttpClient { graffiti: Option<&Graffiti>, skip_randao_verification: SkipRandaoVerification, builder_booster_factor: Option, + graffiti_policy: Option, ) -> Result<(ProduceBlockV3Response, ProduceBlockV3Metadata), Error> { let path = self .get_validator_blocks_v3_path( @@ -2402,6 +2356,7 @@ impl BeaconNodeHttpClient { graffiti, skip_randao_verification, builder_booster_factor, + graffiti_policy, ) .await?; @@ -2683,7 +2638,7 @@ impl BeaconNodeHttpClient { ids: &[u64], epoch: Epoch, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -2828,6 +2783,7 @@ impl BeaconNodeHttpClient { } /// `GET events?topics` + #[cfg(feature = "events")] pub async fn get_events( &self, topic: &[EventTopic], @@ -2928,37 +2884,3 @@ impl BeaconNodeHttpClient { .await } } - -/// Returns `Ok(response)` if the response is a `200 OK` response. Otherwise, creates an -/// appropriate error message. -pub async fn ok_or_error(response: Response) -> Result { - let status = response.status(); - - if status == StatusCode::OK { - Ok(response) - } else if let Ok(message) = response.json().await { - match message { - ResponseError::Message(message) => Err(Error::ServerMessage(message)), - ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), - } - } else { - Err(Error::StatusCode(status)) - } -} - -/// Returns `Ok(response)` if the response is a success (2xx) response. Otherwise, creates an -/// appropriate error message. -pub async fn success_or_error(response: Response) -> Result { - let status = response.status(); - - if status.is_success() { - Ok(response) - } else if let Ok(message) = response.json().await { - match message { - ResponseError::Message(message) => Err(Error::ServerMessage(message)), - ResponseError::Indexed(indexed) => Err(Error::ServerIndexedMessage(indexed)), - } - } else { - Err(Error::StatusCode(status)) - } -} diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 4f9a049e44e..993c263cbfb 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -173,7 +173,7 @@ pub struct DepositLog { impl BeaconNodeHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -185,7 +185,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/syncing` pub async fn get_lighthouse_syncing(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -197,7 +197,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/custody/info` pub async fn get_lighthouse_custody_info(&self) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -210,7 +210,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/custody/backfill` pub async fn post_lighthouse_custody_backfill(&self) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -231,7 +231,7 @@ impl BeaconNodeHttpClient { /// `GET lighthouse/proto_array` pub async fn get_lighthouse_proto_array(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -246,7 +246,7 @@ impl BeaconNodeHttpClient { &self, epoch: Epoch, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -264,7 +264,7 @@ impl BeaconNodeHttpClient { epoch: Epoch, validator_id: ValidatorId, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -278,7 +278,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/database/reconstruct` pub async fn post_lighthouse_database_reconstruct(&self) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -291,7 +291,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/add_peer` pub async fn post_lighthouse_add_peer(&self, req: AdminPeer) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -303,7 +303,7 @@ impl BeaconNodeHttpClient { /// `POST lighthouse/remove_peer` pub async fn post_lighthouse_remove_peer(&self, req: AdminPeer) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -323,7 +323,7 @@ impl BeaconNodeHttpClient { start_slot: Slot, end_slot: Slot, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -344,7 +344,7 @@ impl BeaconNodeHttpClient { start_epoch: Epoch, end_epoch: Epoch, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -366,7 +366,7 @@ impl BeaconNodeHttpClient { end_epoch: Epoch, target: String, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? diff --git a/common/eth2/src/lighthouse_vc/http_client.rs b/common/eth2/src/lighthouse_vc/http_client.rs index 60289605531..3c850fcb052 100644 --- a/common/eth2/src/lighthouse_vc/http_client.rs +++ b/common/eth2/src/lighthouse_vc/http_client.rs @@ -1,5 +1,6 @@ use super::types::*; -use crate::Error; +use crate::{Error, success_or_error}; +use bls::PublicKeyBytes; use reqwest::{ IntoUrl, header::{HeaderMap, HeaderValue}, @@ -145,7 +146,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } /// Perform a HTTP DELETE request, returning the `Response` for further processing. @@ -157,7 +158,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } async fn get(&self, url: U) -> Result { @@ -218,7 +219,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } async fn post( @@ -250,7 +251,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await?; + success_or_error(response).await?; Ok(()) } @@ -268,7 +269,7 @@ impl ValidatorClientHttpClient { .send() .await .map_err(Error::from)?; - ok_or_error(response).await + success_or_error(response).await } /// Perform a HTTP DELETE request. @@ -283,7 +284,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/version` pub async fn get_lighthouse_version(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -295,7 +296,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -309,7 +310,7 @@ impl ValidatorClientHttpClient { pub async fn get_lighthouse_spec( &self, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -323,7 +324,7 @@ impl ValidatorClientHttpClient { pub async fn get_lighthouse_validators( &self, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -338,7 +339,7 @@ impl ValidatorClientHttpClient { &self, validator_pubkey: &PublicKeyBytes, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -354,7 +355,7 @@ impl ValidatorClientHttpClient { &self, validators: Vec, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -369,7 +370,7 @@ impl ValidatorClientHttpClient { &self, request: &CreateValidatorsMnemonicRequest, ) -> Result>, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -385,7 +386,7 @@ impl ValidatorClientHttpClient { &self, request: &KeystoreValidatorsPostRequest, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -401,7 +402,7 @@ impl ValidatorClientHttpClient { &self, request: &[Web3SignerValidatorRequest], ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -424,7 +425,7 @@ impl ValidatorClientHttpClient { prefer_builder_proposals: Option, graffiti: Option, ) -> Result<(), Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -451,7 +452,7 @@ impl ValidatorClientHttpClient { &self, req: &DeleteKeystoresRequest, ) -> Result { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -462,7 +463,7 @@ impl ValidatorClientHttpClient { } fn make_keystores_url(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -472,7 +473,7 @@ impl ValidatorClientHttpClient { } fn make_remotekeys_url(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -482,7 +483,7 @@ impl ValidatorClientHttpClient { } fn make_fee_recipient_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -494,7 +495,7 @@ impl ValidatorClientHttpClient { } fn make_graffiti_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -506,7 +507,7 @@ impl ValidatorClientHttpClient { } fn make_gas_limit_url(&self, pubkey: &PublicKeyBytes) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("eth") @@ -519,7 +520,7 @@ impl ValidatorClientHttpClient { /// `GET lighthouse/auth` pub async fn get_auth(&self) -> Result { - let mut url = self.server.full.clone(); + let mut url = self.server.expose_full().clone(); url.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? .push("lighthouse") @@ -635,7 +636,7 @@ impl ValidatorClientHttpClient { pubkey: &PublicKeyBytes, epoch: Option, ) -> Result, Error> { - let mut path = self.server.full.clone(); + let mut path = self.server.expose_full().clone(); path.path_segments_mut() .map_err(|()| Error::InvalidUrl(self.server.clone()))? @@ -681,20 +682,3 @@ impl ValidatorClientHttpClient { self.delete(url).await } } - -/// Returns `Ok(response)` if the response is a `200 OK` response or a -/// `202 Accepted` response. Otherwise, creates an appropriate error message. -async fn ok_or_error(response: Response) -> Result { - let status = response.status(); - - if status == StatusCode::OK - || status == StatusCode::ACCEPTED - || status == StatusCode::NO_CONTENT - { - Ok(response) - } else if let Ok(message) = response.json().await { - Err(Error::ServerMessage(message)) - } else { - Err(Error::StatusCode(status)) - } -} diff --git a/common/eth2/src/lighthouse_vc/std_types.rs b/common/eth2/src/lighthouse_vc/std_types.rs index 0290bdd0b79..c54252b9e33 100644 --- a/common/eth2/src/lighthouse_vc/std_types.rs +++ b/common/eth2/src/lighthouse_vc/std_types.rs @@ -1,6 +1,7 @@ +use bls::PublicKeyBytes; use eth2_keystore::Keystore; use serde::{Deserialize, Serialize}; -use types::{Address, Graffiti, PublicKeyBytes}; +use types::{Address, Graffiti}; use zeroize::Zeroizing; pub use eip_3076::Interchange; diff --git a/common/eth2/src/lighthouse_vc/types.rs b/common/eth2/src/lighthouse_vc/types.rs index 4407e30e436..07f8421dc5c 100644 --- a/common/eth2/src/lighthouse_vc/types.rs +++ b/common/eth2/src/lighthouse_vc/types.rs @@ -1,8 +1,8 @@ pub use crate::lighthouse::Health; pub use crate::lighthouse_vc::std_types::*; pub use crate::types::{GenericResponse, VersionData}; +use bls::{PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; -use graffiti::GraffitiString; use serde::{Deserialize, Serialize}; use std::path::PathBuf; pub use types::*; diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index f35518ee6bc..b1a61ce00cc 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1,13 +1,15 @@ //! This module exposes a superset of the `types` crate. It adds additional types that are only //! required for the HTTP API. +pub use types::*; + use crate::{ CONSENSUS_BLOCK_VALUE_HEADER, CONSENSUS_VERSION_HEADER, EXECUTION_PAYLOAD_BLINDED_HEADER, EXECUTION_PAYLOAD_VALUE_HEADER, Error as ServerError, }; -use enr::{CombinedKey, Enr}; +use bls::{PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; use mediatype::{MediaType, MediaTypeList, names}; -use multiaddr::Multiaddr; use reqwest::header::HeaderMap; use serde::{Deserialize, Deserializer, Serialize}; use serde_utils::quoted_u64::Quoted; @@ -18,54 +20,24 @@ use std::fmt::{self, Display}; use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use superstruct::superstruct; + +#[cfg(test)] use test_random_derive::TestRandom; -use types::beacon_block_body::KzgCommitments; +#[cfg(test)] use types::test_utils::TestRandom; -pub use types::*; - -#[cfg(feature = "lighthouse")] -use crate::lighthouse::BlockReward; - -/// An API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -#[serde(untagged)] -pub enum Error { - Indexed(IndexedErrorMessage), - Message(ErrorMessage), -} - -/// An API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct ErrorMessage { - pub code: u16, - pub message: String, - #[serde(default)] - pub stacktraces: Vec, -} -/// An indexed API error serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct IndexedErrorMessage { - pub code: u16, - pub message: String, - pub failures: Vec, +// TODO(mac): Temporary module and re-export hack to expose old `consensus/types` via `eth2/types`. +pub use crate::beacon_response::*; +pub mod beacon_response { + pub use crate::beacon_response::*; } -/// A single failure in an index of API errors, serializable to JSON. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct Failure { - pub index: u64, - pub message: String, -} +#[cfg(feature = "lighthouse")] +use crate::lighthouse::BlockReward; -impl Failure { - pub fn new(index: usize, message: String) -> Self { - Self { - index: index as u64, - message, - } - } -} +// Re-export error types from the unified error module +pub use crate::error::{ErrorMessage, Failure, IndexedErrorMessage, ResponseError as Error}; /// The version of a single API endpoint, e.g. the `v1` in `/eth/v1/beacon/blocks`. #[derive(Debug, Clone, Copy, PartialEq)] @@ -590,9 +562,9 @@ pub struct ChainHeadData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct IdentityData { pub peer_id: String, - pub enr: Enr, - pub p2p_addresses: Vec, - pub discovery_addresses: Vec, + pub enr: String, + pub p2p_addresses: Vec, + pub discovery_addresses: Vec, pub metadata: MetaData, } @@ -780,12 +752,20 @@ pub struct ProposerData { pub slot: Slot, } +#[derive(Clone, Copy, Serialize, Deserialize, Default, Debug)] +pub enum GraffitiPolicy { + #[default] + PreserveUserGraffiti, + AppendClientVersions, +} + #[derive(Clone, Deserialize)] pub struct ValidatorBlocksQuery { pub randao_reveal: SignatureBytes, pub graffiti: Option, pub skip_randao_verification: SkipRandaoVerification, pub builder_boost_factor: Option, + pub graffiti_policy: Option, } #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Deserialize)] @@ -1558,6 +1538,21 @@ pub struct ForkChoiceNode { pub weight: u64, pub validity: Option, pub execution_block_hash: Option, + pub extra_data: ForkChoiceExtraData, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +pub struct ForkChoiceExtraData { + pub target_root: Hash256, + pub justified_root: Hash256, + pub finalized_root: Hash256, + pub unrealized_justified_root: Option, + pub unrealized_finalized_root: Option, + pub unrealized_justified_epoch: Option, + pub unrealized_finalized_epoch: Option, + pub execution_status: String, + pub best_child: Option, + pub best_descendant: Option, } #[derive(Copy, Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq)] @@ -1685,8 +1680,8 @@ mod tests { BeaconBlock::::Deneb(BeaconBlockDeneb::empty(&spec)), Signature::empty(), ); - let blobs = BlobsList::::from(vec![Blob::::default()]); - let kzg_proofs = KzgProofs::::from(vec![KzgProof::empty()]); + let blobs = BlobsList::::try_from(vec![Blob::::default()]).unwrap(); + let kzg_proofs = KzgProofs::::try_from(vec![KzgProof::empty()]).unwrap(); let signed_block_contents = PublishBlockRequest::new(Arc::new(block), Some((kzg_proofs, blobs))); @@ -2226,7 +2221,8 @@ pub enum ContentType { Ssz, } -#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom)] +#[cfg_attr(test, derive(TestRandom))] +#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, Encode, Decode)] #[serde(bound = "E: EthSpec")] pub struct BlobsBundle { pub commitments: KzgCommitments, diff --git a/common/eth2_interop_keypairs/Cargo.toml b/common/eth2_interop_keypairs/Cargo.toml index c19b32014e1..309ff233e62 100644 --- a/common/eth2_interop_keypairs/Cargo.toml +++ b/common/eth2_interop_keypairs/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_interop_keypairs" version = "0.2.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -15,3 +16,7 @@ serde_yaml = { workspace = true } [dev-dependencies] base64 = "0.13.0" + +[[test]] +name = "eth2_interop_keypairs_tests" +path = "tests/main.rs" diff --git a/common/eth2_interop_keypairs/tests/main.rs b/common/eth2_interop_keypairs/tests/main.rs new file mode 100644 index 00000000000..4ee50127f29 --- /dev/null +++ b/common/eth2_interop_keypairs/tests/main.rs @@ -0,0 +1,2 @@ +mod from_file; +mod generation; diff --git a/common/eth2_network_config/Cargo.toml b/common/eth2_network_config/Cargo.toml index ec5b0cc1d71..416ffb1975a 100644 --- a/common/eth2_network_config/Cargo.toml +++ b/common/eth2_network_config/Cargo.toml @@ -10,6 +10,7 @@ build = "build.rs" bytes = { workspace = true } discv5 = { workspace = true } eth2_config = { workspace = true } +fixed_bytes = { workspace = true } kzg = { workspace = true } pretty_reqwest_error = { workspace = true } reqwest = { workspace = true } diff --git a/common/eth2_network_config/src/lib.rs b/common/eth2_network_config/src/lib.rs index 12de21239a0..16ee45e524e 100644 --- a/common/eth2_network_config/src/lib.rs +++ b/common/eth2_network_config/src/lib.rs @@ -464,9 +464,10 @@ fn parse_state_download_url(url: &str) -> Result { #[cfg(test)] mod tests { use super::*; + use fixed_bytes::FixedBytesExtended; use ssz::Encode; use tempfile::Builder as TempBuilder; - use types::{Eth1Data, FixedBytesExtended, GnosisEthSpec, MainnetEthSpec}; + use types::{Eth1Data, GnosisEthSpec, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml index 20a8c6e4e48..816d4ec68cc 100644 --- a/common/health_metrics/Cargo.toml +++ b/common/health_metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = { workspace = true } [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } metrics = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] diff --git a/common/lighthouse_version/Cargo.toml b/common/lighthouse_version/Cargo.toml index b7e669ed940..ab9509cb1e8 100644 --- a/common/lighthouse_version/Cargo.toml +++ b/common/lighthouse_version/Cargo.toml @@ -1,12 +1,8 @@ [package] name = "lighthouse_version" -version = "0.1.0" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -git-version = "0.3.4" [dev-dependencies] regex = { workspace = true } diff --git a/common/lighthouse_version/build.rs b/common/lighthouse_version/build.rs new file mode 100644 index 00000000000..1af99996df5 --- /dev/null +++ b/common/lighthouse_version/build.rs @@ -0,0 +1,81 @@ +use std::env; +use std::fs; +use std::path::Path; +use std::process::Command; + +const CLIENT_NAME: &str = "Lighthouse"; + +fn main() { + println!("cargo:rerun-if-changed=build.rs"); + + let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); + let manifest_path = Path::new(&manifest_dir); + + // The crate version is inherited from the workspace. + let semantic_version = env::var("CARGO_PKG_VERSION").unwrap(); + + // Hardcode the .git/ path. + // This assumes the `lighthouse_version` crate will never move. + let git_dir = manifest_path.join("../../.git"); + + if git_dir.exists() { + // HEAD either contains a commit hash directly (detached HEAD), or a reference to a branch. + let head_path = git_dir.join("HEAD"); + if head_path.exists() { + println!("cargo:rerun-if-changed={}", head_path.display()); + + if let Ok(head_content) = fs::read_to_string(&head_path) { + let head_content = head_content.trim(); + + // If HEAD is a reference, also check that file. + if let Some(ref_path) = head_content.strip_prefix("ref: ") { + let full_ref_path = git_dir.join(ref_path); + if full_ref_path.exists() { + println!("cargo:rerun-if-changed={}", full_ref_path.display()); + } + } + } + } + } + + // Construct Lighthouse version string without commit hash. + let base_version = format!("{}/v{}", CLIENT_NAME, semantic_version); + + let commit_hash = get_git_hash(7); + let commit_prefix = get_git_hash(8); + + // If commit hash is valid, construct the full version string. + let version = if !commit_hash.is_empty() && commit_hash.len() >= 7 { + format!("{}-{}", base_version, commit_hash) + } else { + base_version + }; + + println!("cargo:rustc-env=GIT_VERSION={}", version); + println!("cargo:rustc-env=GIT_COMMIT_PREFIX={}", commit_prefix); + println!("cargo:rustc-env=CLIENT_NAME={}", CLIENT_NAME); + println!("cargo:rustc-env=SEMANTIC_VERSION={}", semantic_version); +} + +fn get_git_hash(len: usize) -> String { + Command::new("git") + .args(["rev-parse", &format!("--short={}", len), "HEAD"]) + .output() + .ok() + .and_then(|output| { + if output.status.success() { + String::from_utf8(output.stdout).ok() + } else { + None + } + }) + .map(|s| s.trim().to_string()) + .unwrap_or_else(|| { + // Fallback commit prefix for execution engine reporting. + if len == 8 { + "00000000".to_string() + } else { + String::new() + } + }) +} diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index c865e5ba697..1466487520b 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -1,49 +1,25 @@ -use git_version::git_version; use std::env::consts; /// Returns the current version of this build of Lighthouse. /// -/// A plus-sign (`+`) is appended to the git commit if the tree is dirty. /// Commit hash is omitted if the sources don't include git information. /// /// ## Example /// -/// `Lighthouse/v1.5.1-67da032+` -pub const VERSION: &str = git_version!( - args = [ - "--always", - "--dirty=+", - "--abbrev=7", - // NOTE: using --match instead of --exclude for compatibility with old Git - "--match=thiswillnevermatchlol" - ], - prefix = "Lighthouse/v8.0.1-", - fallback = "Lighthouse/v8.0.1" -); +/// `Lighthouse/v8.0.0-67da032` +pub const VERSION: &str = env!("GIT_VERSION"); /// Returns the first eight characters of the latest commit hash for this build. /// /// No indication is given if the tree is dirty. This is part of the standard /// for reporting the client version to the execution engine. -pub const COMMIT_PREFIX: &str = git_version!( - args = [ - "--always", - "--abbrev=8", - // NOTE: using --match instead of --exclude for compatibility with old Git - "--match=thiswillnevermatchlol" - ], - prefix = "", - suffix = "", - cargo_prefix = "", - cargo_suffix = "", - fallback = "00000000" -); +pub const COMMIT_PREFIX: &str = env!("GIT_COMMIT_PREFIX"); /// Returns `VERSION`, but with platform information appended to the end. /// /// ## Example /// -/// `Lighthouse/v1.5.1-67da032+/x86_64-linux` +/// `Lighthouse/v8.0.0-67da032/x86_64-linux` pub fn version_with_platform() -> String { format!("{}/{}-{}", VERSION, consts::ARCH, consts::OS) } @@ -52,16 +28,16 @@ pub fn version_with_platform() -> String { /// /// ## Example /// -/// `1.5.1` +/// `8.0.0` pub fn version() -> &'static str { - "8.0.1" + env!("SEMANTIC_VERSION") } /// Returns the name of the current client running. /// /// This will usually be "Lighthouse" pub fn client_name() -> &'static str { - "Lighthouse" + env!("CLIENT_NAME") } #[cfg(test)] @@ -72,7 +48,7 @@ mod test { #[test] fn version_formatting() { let re = Regex::new( - r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-(rc|beta).[0-9])?(-[[:xdigit:]]{7})?\+?$", + r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-(rc|beta)\.[0-9])?(-[[:xdigit:]]{7})?$", ) .unwrap(); assert!( @@ -91,4 +67,14 @@ mod test { version() ); } + + #[test] + fn client_name_is_lighthouse() { + assert_eq!(client_name(), "Lighthouse"); + } + + #[test] + fn version_contains_semantic_version() { + assert!(VERSION.contains(version())); + } } diff --git a/common/malloc_utils/Cargo.toml b/common/malloc_utils/Cargo.toml index 39c7137d4cb..1052128852a 100644 --- a/common/malloc_utils/Cargo.toml +++ b/common/malloc_utils/Cargo.toml @@ -21,6 +21,8 @@ jemalloc-profiling = ["tikv-jemallocator/profiling"] # Force the use of system malloc (or glibc) rather than jemalloc. # This is a no-op on Windows where jemalloc is always disabled. sysmalloc = [] +# Enable jemalloc with unprefixed malloc (recommended for reproducible builds) +jemalloc-unprefixed = ["jemalloc", "tikv-jemallocator/unprefixed_malloc_on_supported_platforms"] [dependencies] libc = "0.2.79" diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 9e2c36e2c76..e00b1f027b6 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -6,7 +6,7 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } health_metrics = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 465618c9a82..03b93f2faae 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -195,7 +195,7 @@ impl MonitoringHttpClient { endpoint = %self.monitoring_endpoint, "Sending metrics to remote endpoint" ); - self.post(self.monitoring_endpoint.full.clone(), &metrics) + self.post(self.monitoring_endpoint.expose_full().clone(), &metrics) .await } } diff --git a/common/sensitive_url/Cargo.toml b/common/sensitive_url/Cargo.toml deleted file mode 100644 index ff562097225..00000000000 --- a/common/sensitive_url/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "sensitive_url" -version = "0.1.0" -authors = ["Mac L "] -edition = { workspace = true } -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -serde = { workspace = true } -url = { workspace = true } diff --git a/common/sensitive_url/src/lib.rs b/common/sensitive_url/src/lib.rs deleted file mode 100644 index 64ad070a1fd..00000000000 --- a/common/sensitive_url/src/lib.rs +++ /dev/null @@ -1,120 +0,0 @@ -use serde::{Deserialize, Deserializer, Serialize, Serializer, de}; -use std::fmt; -use std::str::FromStr; -use url::Url; - -#[derive(Debug)] -pub enum SensitiveError { - InvalidUrl(String), - ParseError(url::ParseError), - RedactError(String), -} - -impl fmt::Display for SensitiveError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -// Wrapper around Url which provides a custom `Display` implementation to protect user secrets. -#[derive(Clone, PartialEq)] -pub struct SensitiveUrl { - pub full: Url, - pub redacted: String, -} - -impl fmt::Display for SensitiveUrl { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.redacted.fmt(f) - } -} - -impl fmt::Debug for SensitiveUrl { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.redacted.fmt(f) - } -} - -impl AsRef for SensitiveUrl { - fn as_ref(&self) -> &str { - self.redacted.as_str() - } -} - -impl Serialize for SensitiveUrl { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(self.full.as_ref()) - } -} - -impl<'de> Deserialize<'de> for SensitiveUrl { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s: String = Deserialize::deserialize(deserializer)?; - SensitiveUrl::parse(&s) - .map_err(|e| de::Error::custom(format!("Failed to deserialize sensitive URL {:?}", e))) - } -} - -impl FromStr for SensitiveUrl { - type Err = SensitiveError; - - fn from_str(s: &str) -> Result { - Self::parse(s) - } -} - -impl SensitiveUrl { - pub fn parse(url: &str) -> Result { - let surl = Url::parse(url).map_err(SensitiveError::ParseError)?; - SensitiveUrl::new(surl) - } - - pub fn new(full: Url) -> Result { - let mut redacted = full.clone(); - redacted - .path_segments_mut() - .map_err(|_| SensitiveError::InvalidUrl("URL cannot be a base.".to_string()))? - .clear(); - redacted.set_query(None); - - if redacted.has_authority() { - redacted.set_username("").map_err(|_| { - SensitiveError::RedactError("Unable to redact username.".to_string()) - })?; - redacted.set_password(None).map_err(|_| { - SensitiveError::RedactError("Unable to redact password.".to_string()) - })?; - } - - Ok(Self { - full, - redacted: redacted.to_string(), - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn redact_remote_url() { - let full = "https://project:secret@example.com/example?somequery"; - let surl = SensitiveUrl::parse(full).unwrap(); - assert_eq!(surl.to_string(), "https://example.com/"); - assert_eq!(surl.full.to_string(), full); - } - #[test] - fn redact_localhost_url() { - let full = "http://localhost:5052/"; - let surl = SensitiveUrl::parse(full).unwrap(); - assert_eq!(surl.to_string(), "http://localhost:5052/"); - assert_eq!(surl.full.to_string(), full); - } -} diff --git a/common/test_random_derive/src/lib.rs b/common/test_random_derive/src/lib.rs index 3017936f1a1..bf57d79aaa8 100644 --- a/common/test_random_derive/src/lib.rs +++ b/common/test_random_derive/src/lib.rs @@ -8,7 +8,8 @@ use syn::{DeriveInput, parse_macro_input}; /// The field attribute is: `#[test_random(default)]` fn should_use_default(field: &syn::Field) -> bool { field.attrs.iter().any(|attr| { - attr.path.is_ident("test_random") && attr.tokens.to_string().replace(' ', "") == "(default)" + attr.path().is_ident("test_random") + && matches!(&attr.meta, syn::Meta::List(list) if list.tokens.to_string().replace(' ', "") == "default") }) } diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 4c03b7662ed..a5b373fcae9 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -11,7 +11,7 @@ insecure_keys = [] [dependencies] bls = { workspace = true } deposit_contract = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } hex = { workspace = true } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index bae36789bb5..ab495242e49 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,5 +1,5 @@ use crate::{Error as DirError, ValidatorDir}; -use bls::get_withdrawal_credentials; +use bls::{Keypair, Signature, get_withdrawal_credentials}; use deposit_contract::{Error as DepositError, encode_eth1_tx_data}; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; @@ -7,7 +7,7 @@ use rand::{Rng, distr::Alphanumeric}; use std::fs::{File, create_dir_all}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use types::{ChainSpec, DepositData, Hash256, Keypair, Signature}; +use types::{ChainSpec, DepositData, Hash256}; /// The `Alphanumeric` crate only generates a-z, A-Z, 0-9, therefore it has a range of 62 /// characters. diff --git a/common/validator_dir/src/validator_dir.rs b/common/validator_dir/src/validator_dir.rs index 0ed28c4ddc2..0799897a70f 100644 --- a/common/validator_dir/src/validator_dir.rs +++ b/common/validator_dir/src/validator_dir.rs @@ -2,15 +2,16 @@ use crate::builder::{ ETH1_DEPOSIT_AMOUNT_FILE, ETH1_DEPOSIT_DATA_FILE, VOTING_KEYSTORE_FILE, WITHDRAWAL_KEYSTORE_FILE, keystore_password_path, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; -use derivative::Derivative; +use educe::Educe; use eth2_keystore::{Error as KeystoreError, Keystore, PlainText}; use lockfile::{Lockfile, LockfileError}; use std::fs::{File, read, write}; use std::io; use std::path::{Path, PathBuf}; use tree_hash::TreeHash; -use types::{DepositData, Hash256, Keypair}; +use types::{DepositData, Hash256}; /// The file used to save the Eth1 transaction hash from a deposit. pub const ETH1_DEPOSIT_TX_HASH_FILE: &str = "eth1-deposit-tx-hash.txt"; @@ -32,7 +33,7 @@ pub enum Error { UnableToReadDepositAmount(io::Error), UnableToParseDepositAmount(std::num::ParseIntError), DepositAmountIsNotUtf8(std::string::FromUtf8Error), - UnableToParseDepositData(deposit_contract::DecodeError), + UnableToParseDepositData(deposit_contract::Error), Eth1TxHashExists(PathBuf), UnableToWriteEth1TxHash(io::Error), /// The deposit root in the deposit data file does not match the one generated locally. This is @@ -56,11 +57,11 @@ pub struct Eth1DepositData { /// /// Holds a lockfile in `self.dir` to attempt to prevent concurrent access from multiple /// processes. -#[derive(Debug, Derivative)] -#[derivative(PartialEq)] +#[derive(Debug, Educe)] +#[educe(PartialEq)] pub struct ValidatorDir { dir: PathBuf, - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] _lockfile: Lockfile, } diff --git a/common/validator_dir/tests/tests.rs b/common/validator_dir/tests/tests.rs index 7d9730ebd37..ede80c244ee 100644 --- a/common/validator_dir/tests/tests.rs +++ b/common/validator_dir/tests/tests.rs @@ -1,10 +1,11 @@ #![cfg(not(debug_assertions))] +use bls::Keypair; use eth2_keystore::{Keystore, KeystoreBuilder, PlainText}; use std::fs::{self, File}; use std::path::Path; use tempfile::{TempDir, tempdir}; -use types::{EthSpec, Keypair, MainnetEthSpec, test_utils::generate_deterministic_keypair}; +use types::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use validator_dir::{ Builder, BuilderError, ETH1_DEPOSIT_DATA_FILE, ETH1_DEPOSIT_TX_HASH_FILE, VOTING_KEYSTORE_FILE, ValidatorDir, WITHDRAWAL_KEYSTORE_FILE, diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index c10adbac0df..1c77d4d84b8 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -5,5 +5,6 @@ pub mod cors; pub mod json; pub mod query; pub mod reject; +pub mod status_code; pub mod task; pub mod uor; diff --git a/common/warp_utils/src/status_code.rs b/common/warp_utils/src/status_code.rs new file mode 100644 index 00000000000..1b052973599 --- /dev/null +++ b/common/warp_utils/src/status_code.rs @@ -0,0 +1,9 @@ +use eth2::StatusCode; +use warp::Rejection; + +/// Convert from a "new" `http::StatusCode` to a `warp` compatible one. +pub fn convert(code: StatusCode) -> Result { + code.as_u16().try_into().map_err(|e| { + crate::reject::custom_server_error(format!("bad status code {code:?} - {e:?}")) + }) +} diff --git a/consensus/context_deserialize/context_deserialize/Cargo.toml b/consensus/context_deserialize/context_deserialize/Cargo.toml deleted file mode 100644 index 0e4a97b9ae3..00000000000 --- a/consensus/context_deserialize/context_deserialize/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "context_deserialize" -version = "0.1.0" -edition = "2021" - -[features] -default = ["derive"] -derive = ["dep:context_deserialize_derive"] -milhouse = ["dep:milhouse"] -ssz = ["dep:ssz_types"] -all = ["derive", "milhouse", "ssz"] - -[dependencies] -context_deserialize_derive = { version = "0.1.0", path = "../context_deserialize_derive", optional = true } -milhouse = { workspace = true, optional = true } -serde = { workspace = true } -ssz_types = { workspace = true, optional = true } diff --git a/consensus/context_deserialize/context_deserialize/src/impls/core.rs b/consensus/context_deserialize/context_deserialize/src/impls/core.rs deleted file mode 100644 index 803619365f1..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/core.rs +++ /dev/null @@ -1,103 +0,0 @@ -use crate::ContextDeserialize; -use serde::de::{Deserialize, DeserializeSeed, Deserializer, SeqAccess, Visitor}; -use std::marker::PhantomData; -use std::sync::Arc; - -impl<'de, C, T> ContextDeserialize<'de, T> for Arc -where - C: ContextDeserialize<'de, T>, -{ - fn context_deserialize(deserializer: D, context: T) -> Result - where - D: Deserializer<'de>, - { - Ok(Arc::new(C::context_deserialize(deserializer, context)?)) - } -} - -impl<'de, T, C> ContextDeserialize<'de, C> for Vec -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // Our Visitor, which owns one copy of the context T - struct ContextVisitor { - context: T, - _marker: PhantomData, - } - - impl<'de, C, T> Visitor<'de> for ContextVisitor - where - C: ContextDeserialize<'de, T>, - T: Clone, - { - type Value = Vec; - - fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.write_str("a sequence of context‐deserialized elements") - } - - fn visit_seq(self, mut seq: A) -> Result, A::Error> - where - A: SeqAccess<'de>, - { - let mut out = Vec::with_capacity(seq.size_hint().unwrap_or(0)); - // for each element, we clone the context and hand it to the seed - while let Some(elem) = seq.next_element_seed(ContextSeed { - context: self.context.clone(), - _marker: PhantomData, - })? { - out.push(elem); - } - Ok(out) - } - } - - // A little seed that hands the deserializer + context into C::context_deserialize - struct ContextSeed { - context: C, - _marker: PhantomData, - } - - impl<'de, T, C> DeserializeSeed<'de> for ContextSeed - where - T: ContextDeserialize<'de, C>, - C: Clone, - { - type Value = T; - - fn deserialize(self, deserializer: D) -> Result - where - D: Deserializer<'de>, - { - T::context_deserialize(deserializer, self.context) - } - } - - deserializer.deserialize_seq(ContextVisitor { - context, - _marker: PhantomData, - }) - } -} - -macro_rules! trivial_deserialize { - ($($t:ty),* $(,)?) => { - $( - impl<'de, T> ContextDeserialize<'de, T> for $t { - fn context_deserialize(deserializer: D, _context: T) -> Result - where - D: Deserializer<'de>, - { - <$t>::deserialize(deserializer) - } - } - )* - }; -} - -trivial_deserialize!(bool, u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, f32, f64); diff --git a/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs b/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs deleted file mode 100644 index 3b86f067a3e..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/milhouse.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::ContextDeserialize; -use milhouse::{List, Value, Vector}; -use serde::de::Deserializer; -use ssz_types::typenum::Unsigned; - -impl<'de, C, T, N> ContextDeserialize<'de, C> for List -where - T: ContextDeserialize<'de, C> + Value, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // First deserialize as a Vec. - // This is not the most efficient implementation as it allocates a temporary Vec. In future - // we could write a more performant implementation using `List::builder()`. - let vec = Vec::::context_deserialize(deserializer, context)?; - - // Then convert to List, which will check the length. - List::new(vec) - .map_err(|e| serde::de::Error::custom(format!("Failed to create List: {:?}", e))) - } -} - -impl<'de, C, T, N> ContextDeserialize<'de, C> for Vector -where - T: ContextDeserialize<'de, C> + Value, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - // First deserialize as a List - let list = List::::context_deserialize(deserializer, context)?; - - // Then convert to Vector, which will check the length - Vector::try_from(list).map_err(|e| { - serde::de::Error::custom(format!("Failed to convert List to Vector: {:?}", e)) - }) - } -} diff --git a/consensus/context_deserialize/context_deserialize/src/impls/mod.rs b/consensus/context_deserialize/context_deserialize/src/impls/mod.rs deleted file mode 100644 index 0225c5e031f..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod core; - -#[cfg(feature = "milhouse")] -mod milhouse; - -#[cfg(feature = "ssz")] -mod ssz; diff --git a/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs b/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs deleted file mode 100644 index 26813a96fb7..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/impls/ssz.rs +++ /dev/null @@ -1,51 +0,0 @@ -use crate::ContextDeserialize; -use serde::{ - de::{Deserializer, Error}, - Deserialize, -}; -use ssz_types::{ - length::{Fixed, Variable}, - typenum::Unsigned, - Bitfield, FixedVector, -}; - -impl<'de, C, T, N> ContextDeserialize<'de, C> for FixedVector -where - T: ContextDeserialize<'de, C>, - N: Unsigned, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>, - { - let vec = Vec::::context_deserialize(deserializer, context)?; - FixedVector::new(vec).map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} - -impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> -where - N: Unsigned + Clone, -{ - fn context_deserialize(deserializer: D, _context: C) -> Result - where - D: Deserializer<'de>, - { - Bitfield::>::deserialize(deserializer) - .map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} - -impl<'de, C, N> ContextDeserialize<'de, C> for Bitfield> -where - N: Unsigned + Clone, -{ - fn context_deserialize(deserializer: D, _context: C) -> Result - where - D: Deserializer<'de>, - { - Bitfield::>::deserialize(deserializer) - .map_err(|e| D::Error::custom(format!("{:?}", e))) - } -} diff --git a/consensus/context_deserialize/context_deserialize/src/lib.rs b/consensus/context_deserialize/context_deserialize/src/lib.rs deleted file mode 100644 index e5f2bfdba38..00000000000 --- a/consensus/context_deserialize/context_deserialize/src/lib.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod impls; - -#[cfg(feature = "derive")] -pub use context_deserialize_derive::context_deserialize; - -use serde::de::Deserializer; - -/// General-purpose deserialization trait that accepts extra context `C`. -pub trait ContextDeserialize<'de, C>: Sized { - fn context_deserialize(deserializer: D, context: C) -> Result - where - D: Deserializer<'de>; -} diff --git a/consensus/context_deserialize/context_deserialize_derive/Cargo.toml b/consensus/context_deserialize/context_deserialize_derive/Cargo.toml deleted file mode 100644 index eedae30cdfe..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "context_deserialize_derive" -version = "0.1.0" -edition = "2021" - -[lib] -proc-macro = true - -[dependencies] -quote = { workspace = true } -syn = { workspace = true } - -[dev-dependencies] -context_deserialize = { path = "../context_deserialize" } -serde = { workspace = true } -serde_json = "1.0" diff --git a/consensus/context_deserialize/context_deserialize_derive/src/lib.rs b/consensus/context_deserialize/context_deserialize_derive/src/lib.rs deleted file mode 100644 index 0b73a43b0a4..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/src/lib.rs +++ /dev/null @@ -1,118 +0,0 @@ -extern crate proc_macro; -extern crate quote; -extern crate syn; - -use proc_macro::TokenStream; -use quote::quote; -use syn::{ - parse_macro_input, AttributeArgs, DeriveInput, GenericParam, LifetimeDef, Meta, NestedMeta, - WhereClause, -}; - -#[proc_macro_attribute] -pub fn context_deserialize(attr: TokenStream, item: TokenStream) -> TokenStream { - let args = parse_macro_input!(attr as AttributeArgs); - let input = parse_macro_input!(item as DeriveInput); - let ident = &input.ident; - - let mut ctx_types = Vec::new(); - let mut explicit_where: Option = None; - - for meta in args { - match meta { - NestedMeta::Meta(Meta::Path(p)) => { - ctx_types.push(p); - } - NestedMeta::Meta(Meta::NameValue(nv)) if nv.path.is_ident("bound") => { - if let syn::Lit::Str(lit_str) = &nv.lit { - let where_string = format!("where {}", lit_str.value()); - match syn::parse_str::(&where_string) { - Ok(where_clause) => { - explicit_where = Some(where_clause); - } - Err(err) => { - return syn::Error::new_spanned( - lit_str, - format!("Invalid where clause '{}': {}", lit_str.value(), err), - ) - .to_compile_error() - .into(); - } - } - } else { - return syn::Error::new_spanned( - &nv, - "Expected a string literal for `bound` value", - ) - .to_compile_error() - .into(); - } - } - _ => { - return syn::Error::new_spanned( - &meta, - "Expected paths or `bound = \"...\"` in #[context_deserialize(...)]", - ) - .to_compile_error() - .into(); - } - } - } - - if ctx_types.is_empty() { - return quote! { - compile_error!("Usage: #[context_deserialize(Type1, Type2, ..., bound = \"...\")]"); - } - .into(); - } - - let original_generics = input.generics.clone(); - - // Clone and clean generics for impl use (remove default params) - let mut impl_generics = input.generics.clone(); - for param in impl_generics.params.iter_mut() { - if let GenericParam::Type(ty) = param { - ty.eq_token = None; - ty.default = None; - } - } - - // Ensure 'de lifetime exists in impl generics - let has_de = impl_generics - .lifetimes() - .any(|LifetimeDef { lifetime, .. }| lifetime.ident == "de"); - - if !has_de { - impl_generics.params.insert(0, syn::parse_quote! { 'de }); - } - - let (_, ty_generics, _) = original_generics.split_for_impl(); - let (impl_gens, _, _) = impl_generics.split_for_impl(); - - // Generate: no `'de` applied to the type name - let mut impls = quote! {}; - for ctx in ctx_types { - impls.extend(quote! { - impl #impl_gens context_deserialize::ContextDeserialize<'de, #ctx> - for #ident #ty_generics - #explicit_where - { - fn context_deserialize( - deserializer: D, - _context: #ctx, - ) -> Result - where - D: serde::de::Deserializer<'de>, - { - ::deserialize(deserializer) - } - } - }); - } - - quote! { - #input - #impls - } - .into() -} diff --git a/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs b/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs deleted file mode 100644 index 8fb46da9c65..00000000000 --- a/consensus/context_deserialize/context_deserialize_derive/tests/context_deserialize_derive.rs +++ /dev/null @@ -1,93 +0,0 @@ -use context_deserialize::{context_deserialize, ContextDeserialize}; -use serde::{Deserialize, Serialize}; - -#[test] -fn test_context_deserialize_derive() { - type TestContext = (); - - #[context_deserialize(TestContext)] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Test { - field: String, - } - - let test = Test { - field: "test".to_string(), - }; - let serialized = serde_json::to_string(&test).unwrap(); - let deserialized = - Test::context_deserialize(&mut serde_json::Deserializer::from_str(&serialized), ()) - .unwrap(); - assert_eq!(test, deserialized); -} - -#[test] -fn test_context_deserialize_derive_multiple_types() { - #[allow(dead_code)] - struct TestContext1(u64); - #[allow(dead_code)] - struct TestContext2(String); - - // This will derive: - // - ContextDeserialize for Test - // - ContextDeserialize for Test - // by just leveraging the Deserialize impl - #[context_deserialize(TestContext1, TestContext2)] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Test { - field: String, - } - - let test = Test { - field: "test".to_string(), - }; - let serialized = serde_json::to_string(&test).unwrap(); - let deserialized = Test::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext1(1), - ) - .unwrap(); - assert_eq!(test, deserialized); - - let deserialized = Test::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext2("2".to_string()), - ) - .unwrap(); - - assert_eq!(test, deserialized); -} - -#[test] -fn test_context_deserialize_derive_bound() { - use std::fmt::Debug; - - struct TestContext; - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Inner { - value: u64, - } - - #[context_deserialize( - TestContext, - bound = "T: Serialize + for<'a> Deserialize<'a> + Debug + PartialEq" - )] - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct Wrapper { - inner: T, - } - - let val = Wrapper { - inner: Inner { value: 42 }, - }; - - let serialized = serde_json::to_string(&val).unwrap(); - let deserialized = Wrapper::::context_deserialize( - &mut serde_json::Deserializer::from_str(&serialized), - TestContext, - ) - .unwrap(); - - assert_eq!(val, deserialized); -} diff --git a/consensus/fork_choice/Cargo.toml b/consensus/fork_choice/Cargo.toml index 0a244c2ba19..a07aa38aa5b 100644 --- a/consensus/fork_choice/Cargo.toml +++ b/consensus/fork_choice/Cargo.toml @@ -8,6 +8,7 @@ edition = { workspace = true } [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } logging = { workspace = true } metrics = { workspace = true } proto_array = { workspace = true } diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index fe1f5fba9e4..9a8cae0c365 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -1,5 +1,6 @@ use crate::metrics::{self, scrape_for_metrics}; use crate::{ForkChoiceStore, InvalidationOperation}; +use fixed_bytes::FixedBytesExtended; use logging::crit; use proto_array::{ Block as ProtoBlock, DisallowedReOrgOffsets, ExecutionStatus, JustifiedBalances, @@ -19,7 +20,7 @@ use tracing::{debug, instrument, warn}; use types::{ AbstractExecPayload, AttestationShufflingId, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, ChainSpec, Checkpoint, Epoch, EthSpec, ExecPayload, ExecutionBlockHash, - FixedBytesExtended, Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, + Hash256, IndexedAttestationRef, RelativeEpoch, SignedBeaconBlock, Slot, consts::bellatrix::INTERVALS_PER_SLOT, }; @@ -627,7 +628,7 @@ where op: &InvalidationOperation, ) -> Result<(), Error> { self.proto_array - .process_execution_payload_invalidation::(op) + .process_execution_payload_invalidation::(op, self.finalized_checkpoint()) .map_err(Error::FailedToProcessInvalidExecutionPayload) } @@ -908,6 +909,8 @@ where unrealized_finalized_checkpoint: Some(unrealized_finalized_checkpoint), }, current_slot, + self.justified_checkpoint(), + self.finalized_checkpoint(), )?; Ok(()) @@ -1288,7 +1291,7 @@ where /// Return `true` if `block_root` is equal to the finalized checkpoint, or a known descendant of it. pub fn is_finalized_checkpoint_or_descendant(&self, block_root: Hash256) -> bool { self.proto_array - .is_finalized_checkpoint_or_descendant::(block_root) + .is_finalized_checkpoint_or_descendant::(block_root, self.finalized_checkpoint()) } pub fn is_descendant(&self, ancestor_root: Hash256, descendant_root: Hash256) -> bool { @@ -1508,7 +1511,9 @@ where /// be instantiated again later. pub fn to_persisted(&self) -> PersistedForkChoice { PersistedForkChoice { - proto_array: self.proto_array().as_ssz_container(), + proto_array: self + .proto_array() + .as_ssz_container(self.justified_checkpoint(), self.finalized_checkpoint()), queued_attestations: self.queued_attestations().to_vec(), } } diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 25c3f03d3b9..d3a84ee85be 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -7,6 +7,7 @@ use beacon_chain::{ BeaconChain, BeaconChainError, BeaconForkChoiceStore, ChainConfig, ForkChoiceError, StateSkipConfig, WhenSlotSkipped, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; @@ -17,9 +18,9 @@ use std::time::Duration; use store::MemoryStore; use types::SingleAttestation; use types::{ - BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, - ForkName, Hash256, IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, - SubnetId, test_utils::generate_deterministic_keypair, + BeaconBlockRef, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, ForkName, Hash256, + IndexedAttestation, MainnetEthSpec, RelativeEpoch, SignedBeaconBlock, Slot, SubnetId, + test_utils::generate_deterministic_keypair, }; pub type E = MainnetEthSpec; @@ -753,10 +754,10 @@ async fn invalid_attestation_empty_bitfield() { MutationDelay::NoDelay, |attestation, _| match attestation { IndexedAttestation::Base(att) => { - att.attesting_indices = vec![].into(); + att.attesting_indices = vec![].try_into().unwrap(); } IndexedAttestation::Electra(att) => { - att.attesting_indices = vec![].into(); + att.attesting_indices = vec![].try_into().unwrap(); } }, |result| { diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index d750c054060..5ba8a1b949e 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -14,5 +14,4 @@ fixed_bytes = { workspace = true } safe_arith = { workspace = true } [dev-dependencies] -quickcheck = { workspace = true } -quickcheck_macros = { workspace = true } +proptest = { workspace = true } diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index bf075ec15a5..494c73d05ce 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -413,50 +413,70 @@ impl From for MerkleTreeError { #[cfg(test)] mod tests { use super::*; - use quickcheck::TestResult; - use quickcheck_macros::quickcheck; - - /// Check that we can: - /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. - /// 2. Generate valid proofs for all of the leaves of this MerkleTree. - #[quickcheck] - fn quickcheck_create_and_verify(int_leaves: Vec, depth: usize) -> TestResult { - if depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { - return TestResult::discard(); - } - let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); - let merkle_tree = MerkleTree::create(&leaves, depth); - let merkle_root = merkle_tree.hash(); + use proptest::prelude::*; + + // Limit test depth to avoid generating huge trees. Depth 10 = 1024 max leaves. + const TEST_MAX_DEPTH: usize = 10; - let proofs_ok = (0..leaves.len()).all(|i| { - let (leaf, branch) = merkle_tree - .generate_proof(i, depth) - .expect("should generate proof"); - leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) - }); + fn merkle_leaves_strategy(max_depth: usize) -> impl Strategy, usize)> { + (0..=max_depth).prop_flat_map(|depth| { + let max_leaves = 2usize.pow(depth as u32); + ( + proptest::collection::vec(any::(), 0..=max_leaves), + Just(depth), + ) + }) + } - TestResult::from_bool(proofs_ok) + fn merkle_leaves_strategy_min_depth( + max_depth: usize, + min_depth: usize, + ) -> impl Strategy, usize)> { + (min_depth..=max_depth).prop_flat_map(|depth| { + let max_leaves = 2usize.pow(depth as u32); + ( + proptest::collection::vec(any::(), 0..=max_leaves), + Just(depth), + ) + }) } - #[quickcheck] - fn quickcheck_push_leaf_and_verify(int_leaves: Vec, depth: usize) -> TestResult { - if depth == 0 || depth > MAX_TREE_DEPTH || int_leaves.len() > 2usize.pow(depth as u32) { - return TestResult::discard(); + proptest::proptest! { + /// Check that we can: + /// 1. Build a MerkleTree from arbitrary leaves and an arbitrary depth. + /// 2. Generate valid proofs for all of the leaves of this MerkleTree. + #[test] + fn proptest_create_and_verify((int_leaves, depth) in merkle_leaves_strategy(TEST_MAX_DEPTH)) { + let leaves: Vec<_> = int_leaves.into_iter().map(H256::from_low_u64_be).collect(); + let merkle_tree = MerkleTree::create(&leaves, depth); + let merkle_root = merkle_tree.hash(); + + let proofs_ok = (0..leaves.len()).all(|i| { + let (leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); + leaf == leaves[i] && verify_merkle_proof(leaf, &branch, depth, i, merkle_root) + }); + + proptest::prop_assert!(proofs_ok); } - let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); - let mut merkle_tree = MerkleTree::create(&[], depth); + #[test] + fn proptest_push_leaf_and_verify((int_leaves, depth) in merkle_leaves_strategy_min_depth(TEST_MAX_DEPTH, 1)) { + let leaves_iter = int_leaves.into_iter().map(H256::from_low_u64_be); + let mut merkle_tree = MerkleTree::create(&[], depth); - let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { - assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); - let (stored_leaf, branch) = merkle_tree - .generate_proof(i, depth) - .expect("should generate proof"); - stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) - }); + let proofs_ok = leaves_iter.enumerate().all(|(i, leaf)| { + assert_eq!(merkle_tree.push_leaf(leaf, depth), Ok(())); + let (stored_leaf, branch) = merkle_tree + .generate_proof(i, depth) + .expect("should generate proof"); + stored_leaf == leaf && verify_merkle_proof(leaf, &branch, depth, i, merkle_tree.hash()) + }); - TestResult::from_bool(proofs_ok) + proptest::prop_assert!(proofs_ok); + } } #[test] diff --git a/consensus/proto_array/Cargo.toml b/consensus/proto_array/Cargo.toml index bd6757c0fad..782610e0d35 100644 --- a/consensus/proto_array/Cargo.toml +++ b/consensus/proto_array/Cargo.toml @@ -11,6 +11,7 @@ path = "src/bin.rs" [dependencies] ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true } serde_yaml = { workspace = true } diff --git a/consensus/proto_array/src/fork_choice_test_definition.rs b/consensus/proto_array/src/fork_choice_test_definition.rs index 20987dff26d..e9deb6759fc 100644 --- a/consensus/proto_array/src/fork_choice_test_definition.rs +++ b/consensus/proto_array/src/fork_choice_test_definition.rs @@ -5,11 +5,12 @@ mod votes; use crate::proto_array_fork_choice::{Block, ExecutionStatus, ProtoArrayForkChoice}; use crate::{InvalidationOperation, JustifiedBalances}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use types::{ - AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, FixedBytesExtended, - Hash256, MainnetEthSpec, Slot, + AttestationShufflingId, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + MainnetEthSpec, Slot, }; pub use execution_status::*; @@ -212,7 +213,12 @@ impl ForkChoiceTestDefinition { unrealized_finalized_checkpoint: None, }; fork_choice - .process_block::(block, slot) + .process_block::( + block, + slot, + self.justified_checkpoint, + self.finalized_checkpoint, + ) .unwrap_or_else(|e| { panic!( "process_block op at index {} returned error: {:?}", @@ -272,7 +278,10 @@ impl ForkChoiceTestDefinition { } }; fork_choice - .process_execution_payload_invalidation::(&op) + .process_execution_payload_invalidation::( + &op, + self.finalized_checkpoint, + ) .unwrap() } Operation::AssertWeight { block_root, weight } => assert_eq!( @@ -305,7 +314,8 @@ fn get_checkpoint(i: u64) -> Checkpoint { } fn check_bytes_round_trip(original: &ProtoArrayForkChoice) { - let bytes = original.as_bytes(); + // The checkpoint are ignored `ProtoArrayForkChoice::from_bytes` so any value is ok + let bytes = original.as_bytes(Checkpoint::default(), Checkpoint::default()); let decoded = ProtoArrayForkChoice::from_bytes(&bytes, original.balances.clone()) .expect("fork choice should decode from bytes"); assert!( diff --git a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs index de84fbdd128..d20eaacb99a 100644 --- a/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs +++ b/consensus/proto_array/src/fork_choice_test_definition/no_votes.rs @@ -1,4 +1,4 @@ -use types::FixedBytesExtended; +use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 18af2dfc24c..5bfcdae463d 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -1,5 +1,6 @@ use crate::error::InvalidBestNodeInfo; use crate::{Block, ExecutionStatus, JustifiedBalances, error::Error}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz::four_byte_option_impl; @@ -7,8 +8,8 @@ use ssz_derive::{Decode, Encode}; use std::collections::{HashMap, HashSet}; use superstruct::superstruct; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; // Define a "legacy" implementation of `Option` which uses four bytes for encoding the union @@ -130,8 +131,6 @@ pub struct ProtoArray { /// Do not attempt to prune the tree unless it has at least this many nodes. Small prunes /// simply waste time. pub prune_threshold: usize, - pub justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: HashMap, pub previous_proposer_boost: ProposerBoost, @@ -155,8 +154,8 @@ impl ProtoArray { pub fn apply_score_changes( &mut self, mut deltas: Vec, - justified_checkpoint: Checkpoint, - finalized_checkpoint: Checkpoint, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, new_justified_balances: &JustifiedBalances, proposer_boost_root: Hash256, current_slot: Slot, @@ -169,13 +168,6 @@ impl ProtoArray { }); } - if justified_checkpoint != self.justified_checkpoint - || finalized_checkpoint != self.finalized_checkpoint - { - self.justified_checkpoint = justified_checkpoint; - self.finalized_checkpoint = finalized_checkpoint; - } - // Default the proposer boost score to zero. let mut proposer_score = 0; @@ -296,6 +288,8 @@ impl ProtoArray { parent_index, node_index, current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, )?; } } @@ -306,7 +300,13 @@ impl ProtoArray { /// Register a block with the fork choice. /// /// It is only sane to supply a `None` parent for the genesis block. - pub fn on_block(&mut self, block: Block, current_slot: Slot) -> Result<(), Error> { + pub fn on_block( + &mut self, + block: Block, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> Result<(), Error> { // If the block is already known, simply ignore it. if self.indices.contains_key(&block.root) { return Ok(()); @@ -357,6 +357,8 @@ impl ProtoArray { parent_index, node_index, current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, )?; if matches!(block.execution_status, ExecutionStatus::Valid(_)) { @@ -439,6 +441,7 @@ impl ProtoArray { pub fn propagate_execution_payload_invalidation( &mut self, op: &InvalidationOperation, + best_finalized_checkpoint: Checkpoint, ) -> Result<(), Error> { let mut invalidated_indices: HashSet = <_>::default(); let head_block_root = op.block_root(); @@ -467,7 +470,10 @@ impl ProtoArray { let latest_valid_ancestor_is_descendant = latest_valid_ancestor_root.is_some_and(|ancestor_root| { self.is_descendant(ancestor_root, head_block_root) - && self.is_finalized_checkpoint_or_descendant::(ancestor_root) + && self.is_finalized_checkpoint_or_descendant::( + ancestor_root, + best_finalized_checkpoint, + ) }); // Collect all *ancestors* which were declared invalid since they reside between the @@ -630,6 +636,8 @@ impl ProtoArray { &self, justified_root: &Hash256, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result { let justified_index = self .indices @@ -663,12 +671,17 @@ impl ProtoArray { .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; // Perform a sanity check that the node is indeed valid to be the head. - if !self.node_is_viable_for_head::(best_node, current_slot) { + if !self.node_is_viable_for_head::( + best_node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) { return Err(Error::InvalidBestNode(Box::new(InvalidBestNodeInfo { current_slot, start_root: *justified_root, - justified_checkpoint: self.justified_checkpoint, - finalized_checkpoint: self.finalized_checkpoint, + justified_checkpoint: best_justified_checkpoint, + finalized_checkpoint: best_finalized_checkpoint, head_root: best_node.root, head_justified_checkpoint: best_node.justified_checkpoint, head_finalized_checkpoint: best_node.finalized_checkpoint, @@ -765,6 +778,8 @@ impl ProtoArray { parent_index: usize, child_index: usize, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result<(), Error> { let child = self .nodes @@ -776,8 +791,12 @@ impl ProtoArray { .get(parent_index) .ok_or(Error::InvalidNodeIndex(parent_index))?; - let child_leads_to_viable_head = - self.node_leads_to_viable_head::(child, current_slot)?; + let child_leads_to_viable_head = self.node_leads_to_viable_head::( + child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; // These three variables are aliases to the three options that we may set the // `parent.best_child` and `parent.best_descendant` to. @@ -806,8 +825,12 @@ impl ProtoArray { .get(best_child_index) .ok_or(Error::InvalidBestDescendant(best_child_index))?; - let best_child_leads_to_viable_head = - self.node_leads_to_viable_head::(best_child, current_slot)?; + let best_child_leads_to_viable_head = self.node_leads_to_viable_head::( + best_child, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )?; if child_leads_to_viable_head && !best_child_leads_to_viable_head { // The child leads to a viable head, but the current best-child doesn't. @@ -856,6 +879,8 @@ impl ProtoArray { &self, node: &ProtoNode, current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, ) -> Result { let best_descendant_is_viable_for_head = if let Some(best_descendant_index) = node.best_descendant { @@ -864,13 +889,23 @@ impl ProtoArray { .get(best_descendant_index) .ok_or(Error::InvalidBestDescendant(best_descendant_index))?; - self.node_is_viable_for_head::(best_descendant, current_slot) + self.node_is_viable_for_head::( + best_descendant, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + ) } else { false }; Ok(best_descendant_is_viable_for_head - || self.node_is_viable_for_head::(node, current_slot)) + || self.node_is_viable_for_head::( + node, + current_slot, + best_justified_checkpoint, + best_finalized_checkpoint, + )) } /// This is the equivalent to the `filter_block_tree` function in the eth2 spec: @@ -879,7 +914,13 @@ impl ProtoArray { /// /// Any node that has a different finalized or justified epoch should not be viable for the /// head. - fn node_is_viable_for_head(&self, node: &ProtoNode, current_slot: Slot) -> bool { + fn node_is_viable_for_head( + &self, + node: &ProtoNode, + current_slot: Slot, + best_justified_checkpoint: Checkpoint, + best_finalized_checkpoint: Checkpoint, + ) -> bool { if node.execution_status.is_invalid() { return false; } @@ -901,12 +942,13 @@ impl ProtoArray { node_justified_checkpoint }; - let correct_justified = self.justified_checkpoint.epoch == genesis_epoch - || voting_source.epoch == self.justified_checkpoint.epoch + let correct_justified = best_justified_checkpoint.epoch == genesis_epoch + || voting_source.epoch == best_justified_checkpoint.epoch || voting_source.epoch + 2 >= current_epoch; - let correct_finalized = self.finalized_checkpoint.epoch == genesis_epoch - || self.is_finalized_checkpoint_or_descendant::(node.root); + let correct_finalized = best_finalized_checkpoint.epoch == genesis_epoch + || self + .is_finalized_checkpoint_or_descendant::(node.root, best_finalized_checkpoint); correct_justified && correct_finalized } @@ -961,10 +1003,13 @@ impl ProtoArray { /// /// Notably, this function is checking ancestory of the finalized /// *checkpoint* not the finalized *block*. - pub fn is_finalized_checkpoint_or_descendant(&self, root: Hash256) -> bool { - let finalized_root = self.finalized_checkpoint.root; - let finalized_slot = self - .finalized_checkpoint + pub fn is_finalized_checkpoint_or_descendant( + &self, + root: Hash256, + best_finalized_checkpoint: Checkpoint, + ) -> bool { + let finalized_root = best_finalized_checkpoint.root; + let finalized_slot = best_finalized_checkpoint .epoch .start_slot(E::slots_per_epoch()); @@ -987,7 +1032,7 @@ impl ProtoArray { // If the conditions don't match for this node then they're unlikely to // start matching for its ancestors. for checkpoint in &[node.finalized_checkpoint, node.justified_checkpoint] { - if checkpoint == &self.finalized_checkpoint { + if checkpoint == &best_finalized_checkpoint { return true; } } @@ -996,7 +1041,7 @@ impl ProtoArray { node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { - if checkpoint.is_some_and(|cp| cp == self.finalized_checkpoint) { + if checkpoint.is_some_and(|cp| cp == best_finalized_checkpoint) { return true; } } @@ -1044,12 +1089,18 @@ impl ProtoArray { /// For informational purposes like the beacon HTTP API, we use this as the list of known heads, /// even though some of them might not be viable. We do this to maintain consistency between the /// definition of "head" used by pruning (which does not consider viability) and fork choice. - pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { + pub fn heads_descended_from_finalization( + &self, + best_finalized_checkpoint: Checkpoint, + ) -> Vec<&ProtoNode> { self.nodes .iter() .filter(|node| { node.best_child.is_none() - && self.is_finalized_checkpoint_or_descendant::(node.root) + && self.is_finalized_checkpoint_or_descendant::( + node.root, + best_finalized_checkpoint, + ) }) .collect() } diff --git a/consensus/proto_array/src/proto_array_fork_choice.rs b/consensus/proto_array/src/proto_array_fork_choice.rs index dea853d245d..3edf1e0644d 100644 --- a/consensus/proto_array/src/proto_array_fork_choice.rs +++ b/consensus/proto_array/src/proto_array_fork_choice.rs @@ -7,6 +7,7 @@ use crate::{ }, ssz_container::SszContainer, }; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -15,8 +16,8 @@ use std::{ fmt, }; use types::{ - AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, - FixedBytesExtended, Hash256, Slot, + AttestationShufflingId, ChainSpec, Checkpoint, Epoch, EthSpec, ExecutionBlockHash, Hash256, + Slot, }; pub const DEFAULT_PRUNE_THRESHOLD: usize = 256; @@ -424,8 +425,6 @@ impl ProtoArrayForkChoice { ) -> Result { let mut proto_array = ProtoArray { prune_threshold: DEFAULT_PRUNE_THRESHOLD, - justified_checkpoint, - finalized_checkpoint, nodes: Vec::with_capacity(1), indices: HashMap::with_capacity(1), previous_proposer_boost: ProposerBoost::default(), @@ -449,7 +448,12 @@ impl ProtoArrayForkChoice { }; proto_array - .on_block::(block, current_slot) + .on_block::( + block, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("Failed to add finalized block to proto_array: {:?}", e))?; Ok(Self { @@ -473,9 +477,10 @@ impl ProtoArrayForkChoice { pub fn process_execution_payload_invalidation( &mut self, op: &InvalidationOperation, + finalized_checkpoint: Checkpoint, ) -> Result<(), String> { self.proto_array - .propagate_execution_payload_invalidation::(op) + .propagate_execution_payload_invalidation::(op, finalized_checkpoint) .map_err(|e| format!("Failed to process invalid payload: {:?}", e)) } @@ -499,13 +504,20 @@ impl ProtoArrayForkChoice { &mut self, block: Block, current_slot: Slot, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, ) -> Result<(), String> { if block.parent_root.is_none() { return Err("Missing parent root".to_string()); } self.proto_array - .on_block::(block, current_slot) + .on_block::( + block, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("process_block_error: {:?}", e)) } @@ -547,7 +559,12 @@ impl ProtoArrayForkChoice { *old_balances = new_balances.clone(); self.proto_array - .find_head::(&justified_checkpoint.root, current_slot) + .find_head::( + &justified_checkpoint.root, + current_slot, + justified_checkpoint, + finalized_checkpoint, + ) .map_err(|e| format!("find_head failed: {:?}", e)) } @@ -884,9 +901,10 @@ impl ProtoArrayForkChoice { pub fn is_finalized_checkpoint_or_descendant( &self, descendant_root: Hash256, + best_finalized_checkpoint: Checkpoint, ) -> bool { self.proto_array - .is_finalized_checkpoint_or_descendant::(descendant_root) + .is_finalized_checkpoint_or_descendant::(descendant_root, best_finalized_checkpoint) } pub fn latest_message(&self, validator_index: usize) -> Option<(Hash256, Epoch)> { @@ -916,12 +934,21 @@ impl ProtoArrayForkChoice { self.proto_array.iter_block_roots(block_root) } - pub fn as_ssz_container(&self) -> SszContainer { - SszContainer::from(self) + pub fn as_ssz_container( + &self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> SszContainer { + SszContainer::from_proto_array(self, justified_checkpoint, finalized_checkpoint) } - pub fn as_bytes(&self) -> Vec { - SszContainer::from(self).as_ssz_bytes() + pub fn as_bytes( + &self, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> Vec { + self.as_ssz_container(justified_checkpoint, finalized_checkpoint) + .as_ssz_bytes() } pub fn from_bytes(bytes: &[u8], balances: JustifiedBalances) -> Result { @@ -954,8 +981,12 @@ impl ProtoArrayForkChoice { } /// Returns all nodes that have zero children and are descended from the finalized checkpoint. - pub fn heads_descended_from_finalization(&self) -> Vec<&ProtoNode> { - self.proto_array.heads_descended_from_finalization::() + pub fn heads_descended_from_finalization( + &self, + best_finalized_checkpoint: Checkpoint, + ) -> Vec<&ProtoNode> { + self.proto_array + .heads_descended_from_finalization::(best_finalized_checkpoint) } } @@ -1065,7 +1096,8 @@ fn compute_deltas( #[cfg(test)] mod test_compute_deltas { use super::*; - use types::{FixedBytesExtended, MainnetEthSpec}; + use fixed_bytes::FixedBytesExtended; + use types::MainnetEthSpec; /// Gives a hash that is not the zero hash (unless i is `usize::MAX)`. fn hash_from_index(i: usize) -> Hash256 { @@ -1125,6 +1157,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), }, genesis_slot + 1, + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); @@ -1148,6 +1182,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: None, }, genesis_slot + 1, + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); @@ -1161,10 +1197,24 @@ mod test_compute_deltas { assert!(!fc.is_descendant(finalized_root, not_finalized_desc)); assert!(!fc.is_descendant(finalized_root, unknown)); - assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_root)); - assert!(fc.is_finalized_checkpoint_or_descendant::(finalized_desc)); - assert!(!fc.is_finalized_checkpoint_or_descendant::(not_finalized_desc)); - assert!(!fc.is_finalized_checkpoint_or_descendant::(unknown)); + assert!(fc.is_finalized_checkpoint_or_descendant::( + finalized_root, + genesis_checkpoint + )); + assert!(fc.is_finalized_checkpoint_or_descendant::( + finalized_desc, + genesis_checkpoint + )); + assert!(!fc.is_finalized_checkpoint_or_descendant::( + not_finalized_desc, + genesis_checkpoint + )); + assert!( + !fc.is_finalized_checkpoint_or_descendant::( + unknown, + genesis_checkpoint + ) + ); assert!(!fc.is_descendant(finalized_desc, not_finalized_desc)); assert!(fc.is_descendant(finalized_desc, finalized_desc)); @@ -1260,6 +1310,8 @@ mod test_compute_deltas { unrealized_finalized_checkpoint: Some(genesis_checkpoint), }, Slot::from(block.slot), + genesis_checkpoint, + genesis_checkpoint, ) .unwrap(); }; @@ -1314,29 +1366,34 @@ mod test_compute_deltas { // Set the finalized checkpoint to finalize the first slot of epoch 1 on // the canonical chain. - fc.proto_array.finalized_checkpoint = Checkpoint { + let finalized_checkpoint = Checkpoint { root: finalized_root, epoch: Epoch::new(1), }; assert!( fc.proto_array - .is_finalized_checkpoint_or_descendant::(finalized_root), + .is_finalized_checkpoint_or_descendant::( + finalized_root, + finalized_checkpoint + ), "the finalized checkpoint is the finalized checkpoint" ); assert!( fc.proto_array - .is_finalized_checkpoint_or_descendant::(get_block_root( - canonical_slot - )), + .is_finalized_checkpoint_or_descendant::( + get_block_root(canonical_slot), + finalized_checkpoint + ), "the canonical block is a descendant of the finalized checkpoint" ); assert!( !fc.proto_array - .is_finalized_checkpoint_or_descendant::(get_block_root( - non_canonical_slot - )), + .is_finalized_checkpoint_or_descendant::( + get_block_root(non_canonical_slot), + finalized_checkpoint + ), "although the non-canonical block is a descendant of the finalized block, \ it's not a descendant of the finalized checkpoint" ); diff --git a/consensus/proto_array/src/ssz_container.rs b/consensus/proto_array/src/ssz_container.rs index 0bb3f2b35d8..1e01b74c8cd 100644 --- a/consensus/proto_array/src/ssz_container.rs +++ b/consensus/proto_array/src/ssz_container.rs @@ -26,22 +26,28 @@ pub struct SszContainer { #[superstruct(only(V17))] pub balances: Vec, pub prune_threshold: usize, - pub justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + justified_checkpoint: Checkpoint, + // Deprecated, remove in a future schema migration + finalized_checkpoint: Checkpoint, pub nodes: Vec, pub indices: Vec<(Hash256, usize)>, pub previous_proposer_boost: ProposerBoost, } -impl From<&ProtoArrayForkChoice> for SszContainer { - fn from(from: &ProtoArrayForkChoice) -> Self { +impl SszContainer { + pub fn from_proto_array( + from: &ProtoArrayForkChoice, + justified_checkpoint: Checkpoint, + finalized_checkpoint: Checkpoint, + ) -> Self { let proto_array = &from.proto_array; Self { votes: from.votes.0.clone(), prune_threshold: proto_array.prune_threshold, - justified_checkpoint: proto_array.justified_checkpoint, - finalized_checkpoint: proto_array.finalized_checkpoint, + justified_checkpoint, + finalized_checkpoint, nodes: proto_array.nodes.clone(), indices: proto_array.indices.iter().map(|(k, v)| (*k, *v)).collect(), previous_proposer_boost: proto_array.previous_proposer_boost, @@ -55,8 +61,6 @@ impl TryFrom<(SszContainer, JustifiedBalances)> for ProtoArrayForkChoice { fn try_from((from, balances): (SszContainer, JustifiedBalances)) -> Result { let proto_array = ProtoArray { prune_threshold: from.prune_threshold, - justified_checkpoint: from.justified_checkpoint, - finalized_checkpoint: from.finalized_checkpoint, nodes: from.nodes, indices: from.indices.into_iter().collect::>(), previous_proposer_boost: from.previous_proposer_boost, diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index fc55bde9c67..a08035d5838 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -20,15 +20,17 @@ portable = ["bls/supranational-portable"] [dependencies] arbitrary = { workspace = true } bls = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } int_to_bytes = { workspace = true } integer-sqrt = "0.1.5" itertools = { workspace = true } merkle_proof = { workspace = true } metrics = { workspace = true } +milhouse = { workspace = true } rand = { workspace = true } rayon = { workspace = true } safe_arith = { workspace = true } @@ -37,6 +39,7 @@ ssz_types = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] diff --git a/consensus/state_processing/src/common/get_attesting_indices.rs b/consensus/state_processing/src/common/get_attesting_indices.rs index e4f5aa3c8bc..dc7be7c2515 100644 --- a/consensus/state_processing/src/common/get_attesting_indices.rs +++ b/consensus/state_processing/src/common/get_attesting_indices.rs @@ -2,6 +2,7 @@ use types::*; pub mod attesting_indices_base { use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; + use ssz_types::{BitList, VariableList}; use types::*; /// Convert `attestation` to (almost) indexed-verifiable form. @@ -44,10 +45,10 @@ pub mod attesting_indices_base { } pub mod attesting_indices_electra { - use std::collections::HashSet; - use crate::per_block_processing::errors::{AttestationInvalid as Invalid, BlockOperationError}; use safe_arith::SafeArith; + use ssz_types::{BitList, BitVector, VariableList}; + use std::collections::HashSet; use types::*; /// Compute an Electra IndexedAttestation given a list of committees. diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 52f360849e0..01c1855fb10 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -6,6 +6,7 @@ use crate::{ }; use safe_arith::SafeArith; use std::cmp; +use typenum::Unsigned; use types::{ consts::altair::{PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, *, diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs index 86db037446b..ee03596d098 100644 --- a/consensus/state_processing/src/epoch_cache.rs +++ b/consensus/state_processing/src/epoch_cache.rs @@ -2,12 +2,11 @@ use crate::common::altair::BaseRewardPerIncrement; use crate::common::base::SqrtTotalActiveBalance; use crate::common::{altair, base}; use crate::metrics; +use fixed_bytes::FixedBytesExtended; use safe_arith::SafeArith; use tracing::instrument; use types::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -use types::{ - ActivationQueue, BeaconState, ChainSpec, EthSpec, FixedBytesExtended, ForkName, Hash256, -}; +use types::{ActivationQueue, BeaconState, ChainSpec, EthSpec, ForkName, Hash256}; /// Precursor to an `EpochCache`. pub struct PreEpochCache { diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 88ef79310dc..1575fce22f2 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -7,6 +7,7 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, upgrade_to_fulu, upgrade_to_gloas, }; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; use tree_hash::TreeHash; @@ -167,9 +168,8 @@ pub fn initialize_beacon_state_from_eth1( state.fork_mut().previous_version = spec.gloas_fork_version; // Override latest execution payload header. - if let Some(ExecutionPayloadHeader::Gloas(header)) = execution_payload_header { - *state.latest_execution_payload_header_gloas_mut()? = header.clone(); - } + // Here's where we *would* clone the header but there is no header here so.. + // TODO(EIP7732): check this } // Now that we have our validators, initialize the caches (including the committees) diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 99abbef9c1e..07149ff2ee8 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -5,6 +5,7 @@ use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::*; pub use self::verify_attester_slashing::{ @@ -40,7 +41,6 @@ mod verify_exit; mod verify_proposer_slashing; use crate::common::decrease_balance; - use crate::common::update_progressive_balances_cache::{ initialize_progressive_balances_cache, update_progressive_balances_metrics, }; @@ -172,10 +172,14 @@ pub fn per_block_processing>( // previous block. if is_execution_enabled(state, block.body()) { let body = block.body(); + // TODO(EIP-7732): build out process_withdrawals variant for gloas process_withdrawals::(state, body.execution_payload()?, spec)?; process_execution_payload::(state, body, spec)?; } + // TODO(EIP-7732): build out process_execution_bid + // process_execution_bid(state, block, verify_signatures, spec)?; + process_randao(state, block, verify_randao, ctxt, spec)?; process_eth1_data(state, block.body().eth1_data())?; process_operations(state, block.body(), verify_signatures, ctxt, spec)?; @@ -452,12 +456,6 @@ pub fn process_execution_payload>( _ => return Err(BlockProcessingError::IncorrectStateType), } } - ExecutionPayloadHeaderRefMut::Gloas(header_mut) => { - match payload.to_execution_payload_header() { - ExecutionPayloadHeader::Gloas(header) => *header_mut = header, - _ => return Err(BlockProcessingError::IncorrectStateType), - } - } } Ok(()) @@ -469,6 +467,7 @@ pub fn process_execution_payload>( /// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { + // TODO(EIP7732): check this cause potuz modified this function for god knows what reason if state.fork_name_unchecked().capella_enabled() { true } else if state.fork_name_unchecked().bellatrix_enabled() { @@ -628,10 +627,16 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok((withdrawals.into(), processed_partial_withdrawals_count)) + Ok(( + withdrawals + .try_into() + .map_err(BlockProcessingError::SszTypesError)?, + processed_partial_withdrawals_count, + )) } /// Apply withdrawals to the state. +/// TODO(EIP-7732): abstract this out and create gloas variant pub fn process_withdrawals>( state: &mut BeaconState, payload: Payload::Ref<'_>, diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 1219c7df442..8cc9de42db0 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -1,12 +1,12 @@ use crate::common::{altair::BaseRewardPerIncrement, decrease_balance, increase_balance}; use crate::per_block_processing::errors::{BlockProcessingError, SyncAggregateInvalid}; use crate::{VerifySignatures, signature_sets::sync_aggregate_signature_set}; +use bls::PublicKeyBytes; use safe_arith::SafeArith; use std::borrow::Cow; +use typenum::Unsigned; use types::consts::altair::{PROPOSER_WEIGHT, SYNC_REWARD_WEIGHT, WEIGHT_DENOMINATOR}; -use types::{ - BeaconState, BeaconStateError, ChainSpec, EthSpec, PublicKeyBytes, SyncAggregate, Unsigned, -}; +use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, SyncAggregate}; pub fn process_sync_aggregate( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 9a1c6c2f6ad..8afeeb685bc 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -5,8 +5,9 @@ use crate::common::{ slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; +use ssz_types::FixedVector; +use typenum::U33; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; -use types::typenum::U33; pub fn process_operations>( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index dafd0d79ea9..0e936007eec 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -2,17 +2,18 @@ //! validated individually, or alongside in others in a potentially cheaper bulk operation. //! //! This module exposes one function to extract each type of `SignatureSet` from a `BeaconBlock`. -use bls::SignatureSet; +use bls::{AggregateSignature, PublicKey, PublicKeyBytes, Signature, SignatureSet}; use ssz::DecodeError; use std::borrow::Cow; use tree_hash::TreeHash; +use typenum::Unsigned; use types::{ - AbstractExecPayload, AggregateSignature, AttesterSlashingRef, BeaconBlockRef, BeaconState, - BeaconStateError, ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, - InconsistentFork, IndexedAttestation, IndexedAttestationRef, ProposerSlashing, PublicKey, - PublicKeyBytes, Signature, SignedAggregateAndProof, SignedBeaconBlock, SignedBeaconBlockHeader, - SignedBlsToExecutionChange, SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, - SigningData, Slot, SyncAggregate, SyncAggregatorSelectionData, Unsigned, + AbstractExecPayload, AttesterSlashingRef, BeaconBlockRef, BeaconState, BeaconStateError, + ChainSpec, DepositData, Domain, Epoch, EthSpec, Fork, Hash256, InconsistentFork, + IndexedAttestation, IndexedAttestationRef, ProposerSlashing, SignedAggregateAndProof, + SignedBeaconBlock, SignedBeaconBlockHeader, SignedBlsToExecutionChange, + SignedContributionAndProof, SignedRoot, SignedVoluntaryExit, SigningData, Slot, SyncAggregate, + SyncAggregatorSelectionData, }; pub type Result = std::result::Result; diff --git a/consensus/state_processing/src/per_block_processing/tests.rs b/consensus/state_processing/src/per_block_processing/tests.rs index 183063ac762..739717b33ff 100644 --- a/consensus/state_processing/src/per_block_processing/tests.rs +++ b/consensus/state_processing/src/per_block_processing/tests.rs @@ -11,7 +11,10 @@ use crate::{ per_block_processing::{process_operations, verify_exit::verify_exit}, }; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::{AggregateSignature, Keypair, PublicKeyBytes, Signature, SignatureBytes}; +use fixed_bytes::FixedBytesExtended; use ssz_types::Bitfield; +use ssz_types::VariableList; use std::sync::{Arc, LazyLock}; use test_utils::generate_deterministic_keypairs; use types::*; @@ -213,7 +216,7 @@ async fn valid_4_deposits() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 4, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -237,7 +240,7 @@ async fn invalid_deposit_deposit_count_too_big() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -269,7 +272,7 @@ async fn invalid_deposit_count_too_small() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -301,7 +304,7 @@ async fn invalid_deposit_bad_merkle_proof() { let mut state = harness.get_current_state(); let (deposits, state) = harness.make_deposits(&mut state, 1, None, None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -336,7 +339,7 @@ async fn invalid_deposit_wrong_sig() { let (deposits, state) = harness.make_deposits(&mut state, 1, None, Some(SignatureBytes::empty())); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -360,7 +363,7 @@ async fn invalid_deposit_invalid_pub_key() { let (deposits, state) = harness.make_deposits(&mut state, 1, Some(PublicKeyBytes::empty()), None); - let deposits = VariableList::from(deposits); + let deposits = VariableList::try_from(deposits).unwrap(); let mut head_block = harness .chain @@ -753,10 +756,12 @@ async fn invalid_attester_slashing_1_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { AttesterSlashing::Base(attester_slashing) => { - attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_1.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } AttesterSlashing::Electra(attester_slashing) => { - attester_slashing.attestation_1.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_1.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } } @@ -791,10 +796,12 @@ async fn invalid_attester_slashing_2_invalid() { let mut attester_slashing = harness.make_attester_slashing(vec![1, 2]); match &mut attester_slashing { AttesterSlashing::Base(attester_slashing) => { - attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_2.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } AttesterSlashing::Electra(attester_slashing) => { - attester_slashing.attestation_2.attesting_indices = VariableList::from(vec![2, 1]); + attester_slashing.attestation_2.attesting_indices = + VariableList::try_from(vec![2, 1]).unwrap(); } } diff --git a/consensus/state_processing/src/per_block_processing/verify_deposit.rs b/consensus/state_processing/src/per_block_processing/verify_deposit.rs index c996e580a78..d403bfa82b6 100644 --- a/consensus/state_processing/src/per_block_processing/verify_deposit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_deposit.rs @@ -1,5 +1,6 @@ use super::errors::{BlockOperationError, DepositInvalid}; use crate::per_block_processing::signature_sets::deposit_pubkey_signature_message; +use bls::PublicKeyBytes; use merkle_proof::verify_merkle_proof; use safe_arith::SafeArith; use tree_hash::TreeHash; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index 5c08406eaef..5e177c5d2b7 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -1,5 +1,5 @@ use crate::EpochProcessingError; -use types::List; +use milhouse::List; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index fd712cc8e50..a818e087755 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -1,9 +1,10 @@ use super::base::{TotalBalances, ValidatorStatus, validator_statuses::InclusionInfo}; use crate::metrics; +use milhouse::List; use std::sync::Arc; use types::{ - BeaconStateError, Epoch, EthSpec, List, ParticipationFlags, ProgressiveBalancesCache, - SyncCommittee, Validator, + BeaconStateError, Epoch, EthSpec, ParticipationFlags, ProgressiveBalancesCache, SyncCommittee, + Validator, consts::altair::{TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX}, }; diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index a5a2a69ebff..4818dcbf670 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,4 +1,5 @@ -use types::{BeaconStateError, EpochCacheError, InconsistentFork, milhouse}; +use milhouse; +use types::{BeaconStateError, EpochCacheError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 8fcdda062c9..9172d954bc8 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -1,7 +1,7 @@ use super::errors::EpochProcessingError; use safe_arith::SafeArith; use tree_hash::TreeHash; -use types::Unsigned; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; diff --git a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs index 66d68804e1d..8d712fd19b8 100644 --- a/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs +++ b/consensus/state_processing/src/per_epoch_processing/justification_and_finalization_state.rs @@ -1,4 +1,5 @@ -use types::{BeaconState, BeaconStateError, BitVector, Checkpoint, Epoch, EthSpec, Hash256}; +use ssz_types::BitVector; +use types::{BeaconState, BeaconStateError, Checkpoint, Epoch, EthSpec, Hash256}; /// This is a subset of the `BeaconState` which is used to compute justification and finality /// without modifying the `BeaconState`. diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index c9f69c3c95e..e05fb30c334 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -1,8 +1,9 @@ use super::errors::EpochProcessingError; +use milhouse::List; use safe_arith::SafeArith; +use typenum::Unsigned; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{List, Unsigned}; pub fn process_eth1_data_reset( state: &mut BeaconState, diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 1584e932bdf..914e025f2fe 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -8,19 +8,20 @@ use crate::{ per_epoch_processing::{Delta, Error, ParticipationEpochSummary}, }; use itertools::izip; +use milhouse::{Cow, List, Vector}; use safe_arith::{SafeArith, SafeArithIter}; use std::cmp::{max, min}; use std::collections::{BTreeSet, HashMap}; use tracing::instrument; +use typenum::Unsigned; use types::{ ActivationQueue, BeaconState, BeaconStateError, ChainSpec, Checkpoint, DepositData, Epoch, - EthSpec, ExitCache, ForkName, List, ParticipationFlags, PendingDeposit, - ProgressiveBalancesCache, RelativeEpoch, Unsigned, Validator, Vector, + EthSpec, ExitCache, ForkName, ParticipationFlags, PendingDeposit, ProgressiveBalancesCache, + RelativeEpoch, Validator, consts::altair::{ NUM_FLAG_INDICES, PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }, - milhouse::Cow, }; pub struct SinglePassConfig { diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 47eb06e907a..6008276d150 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -4,7 +4,8 @@ use crate::per_epoch_processing::{ single_pass::{SinglePassConfig, process_epoch_single_pass}, }; use safe_arith::{SafeArith, SafeArithIter}; -use types::{BeaconState, ChainSpec, EthSpec, Unsigned}; +use typenum::Unsigned; +use types::{BeaconState, ChainSpec, EthSpec}; /// Process slashings. pub fn process_slashings( diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 8695054e1e7..0f8e5dc52d8 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -3,6 +3,7 @@ use crate::upgrade::{ upgrade_to_electra, upgrade_to_fulu, upgrade_to_gloas, }; use crate::{per_epoch_processing::EpochProcessingSummary, *}; +use fixed_bytes::FixedBytesExtended; use safe_arith::{ArithError, SafeArith}; use tracing::instrument; use types::*; diff --git a/consensus/state_processing/src/state_advance.rs b/consensus/state_processing/src/state_advance.rs index 4d38e7797e6..19b21dad19a 100644 --- a/consensus/state_processing/src/state_advance.rs +++ b/consensus/state_processing/src/state_advance.rs @@ -5,7 +5,8 @@ //! duplication and protect against some easy-to-make mistakes when performing state advances. use crate::*; -use types::{BeaconState, ChainSpec, EthSpec, FixedBytesExtended, Hash256, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; #[derive(Debug, PartialEq)] pub enum Error { diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 3006da25ae7..022175ff999 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -2,11 +2,12 @@ use crate::common::update_progressive_balances_cache::initialize_progressive_bal use crate::common::{ attesting_indices_base::get_attesting_indices, get_attestation_participation_flag_indices, }; +use milhouse::List; use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, + Fork, ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index ae0dbde7678..948fa511b73 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,7 +1,8 @@ +use milhouse::List; use std::mem; use types::{ BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EpochCache, EthSpec, - Fork, List, + Fork, }; /// Transform a `Bellatrix` state into an `Capella` state. diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs index c2aced7047a..c14c1edbec3 100644 --- a/consensus/state_processing/src/upgrade/fulu.rs +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -1,8 +1,7 @@ +use milhouse::Vector; use safe_arith::SafeArith; use std::mem; -use types::{ - BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork, Vector, -}; +use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; /// Transform a `Electra` state into an `Fulu` state. pub fn upgrade_to_fulu( diff --git a/consensus/state_processing/src/upgrade/gloas.rs b/consensus/state_processing/src/upgrade/gloas.rs index 8bb6991bfbe..d6c353cc2a9 100644 --- a/consensus/state_processing/src/upgrade/gloas.rs +++ b/consensus/state_processing/src/upgrade/gloas.rs @@ -1,5 +1,11 @@ +use bls::Hash256; +use milhouse::{List, Vector}; +use ssz_types::BitVector; use std::mem; -use types::{BeaconState, BeaconStateError as Error, BeaconStateGloas, ChainSpec, EthSpec, Fork}; +use types::{ + BeaconState, BeaconStateError as Error, BeaconStateGloas, BuilderPendingPayment, ChainSpec, + EthSpec, ExecutionPayloadBid, Fork, +}; /// Transform a `Fulu` state into a `Gloas` state. pub fn upgrade_to_gloas( @@ -63,8 +69,8 @@ pub fn upgrade_state_to_gloas( // Sync committees current_sync_committee: pre.current_sync_committee.clone(), next_sync_committee: pre.next_sync_committee.clone(), - // Execution - latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_gloas(), + // Execution Bid + latest_execution_payload_bid: ExecutionPayloadBid::default(), // Capella next_withdrawal_index: pre.next_withdrawal_index, next_withdrawal_validator_index: pre.next_withdrawal_validator_index, @@ -79,6 +85,15 @@ pub fn upgrade_state_to_gloas( pending_deposits: pre.pending_deposits.clone(), pending_partial_withdrawals: pre.pending_partial_withdrawals.clone(), pending_consolidations: pre.pending_consolidations.clone(), + // Gloas + execution_payload_availability: BitVector::default(), // All bits set to false initially + builder_pending_payments: Vector::new(vec![ + BuilderPendingPayment::default(); + E::builder_pending_payments_limit() + ])?, + builder_pending_withdrawals: List::default(), // Empty list initially, + latest_block_hash: pre.latest_execution_payload_header.block_hash, + latest_withdrawals_root: Hash256::default(), // Caches total_active_balance: pre.total_active_balance, progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), diff --git a/consensus/state_processing/src/verify_operation.rs b/consensus/state_processing/src/verify_operation.rs index a922e47cfef..1f76f19586f 100644 --- a/consensus/state_processing/src/verify_operation.rs +++ b/consensus/state_processing/src/verify_operation.rs @@ -8,7 +8,7 @@ use crate::per_block_processing::{ verify_proposer_slashing, }; use arbitrary::Arbitrary; -use derivative::Derivative; +use educe::Educe; use smallvec::{SmallVec, smallvec}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; @@ -39,11 +39,11 @@ pub trait TransformPersist { /// /// The inner `op` field is private, meaning instances of this type can only be constructed /// by calling `validate`. -#[derive(Derivative, Debug, Clone, Arbitrary)] -#[derivative( +#[derive(Educe, Debug, Clone, Arbitrary)] +#[educe( PartialEq, Eq, - Hash(bound = "T: TransformPersist + std::hash::Hash, E: EthSpec") + Hash(bound(T: TransformPersist + std::hash::Hash, E: EthSpec)) )] #[arbitrary(bound = "T: TransformPersist + Arbitrary<'arbitrary>, E: EthSpec")] pub struct SigVerifiedOp { diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index bfce4b72d22..78c6f871cb4 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -25,13 +25,12 @@ portable = ["bls/supranational-portable"] [dependencies] alloy-primitives = { workspace = true } -alloy-rlp = { version = "0.3.4", features = ["derive"] } +alloy-rlp = { workspace = true, features = ["derive"] } arbitrary = { workspace = true, features = ["derive"], optional = true } bls = { workspace = true } compare_fields = { workspace = true } -compare_fields_derive = { workspace = true } context_deserialize = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } @@ -66,6 +65,7 @@ test_random_derive = { path = "../../common/test_random_derive" } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } @@ -74,6 +74,9 @@ paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } +[lints.clippy] +module_inception = "allow" + [[bench]] name = "benches" harness = false diff --git a/consensus/types/benches/benches.rs b/consensus/types/benches/benches.rs index 814001d9660..397c33163e9 100644 --- a/consensus/types/benches/benches.rs +++ b/consensus/types/benches/benches.rs @@ -1,10 +1,11 @@ use criterion::{BatchSize, BenchmarkId, Criterion, black_box, criterion_group, criterion_main}; +use fixed_bytes::FixedBytesExtended; use milhouse::List; use rayon::prelude::*; use ssz::Encode; use std::sync::Arc; use types::{ - BeaconState, Epoch, Eth1Data, EthSpec, FixedBytesExtended, Hash256, MainnetEthSpec, Validator, + BeaconState, Epoch, Eth1Data, EthSpec, Hash256, MainnetEthSpec, Validator, test_utils::generate_deterministic_keypair, }; diff --git a/consensus/types/src/aggregate_and_proof.rs b/consensus/types/src/attestation/aggregate_and_proof.rs similarity index 93% rename from consensus/types/src/aggregate_and_proof.rs rename to consensus/types/src/attestation/aggregate_and_proof.rs index e76ba48bf47..4c6e775e56d 100644 --- a/consensus/types/src/aggregate_and_proof.rs +++ b/consensus/types/src/attestation/aggregate_and_proof.rs @@ -1,17 +1,20 @@ -use super::{AttestationBase, AttestationElectra, AttestationRef}; -use super::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, SecretKey, SelectionProof, - Signature, SignedRoot, -}; -use crate::Attestation; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{PublicKey, SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( diff --git a/consensus/types/src/attestation.rs b/consensus/types/src/attestation/attestation.rs similarity index 94% rename from consensus/types/src/attestation.rs rename to consensus/types/src/attestation/attestation.rs index 860f0d0a2d3..693b5889f53 100644 --- a/consensus/types/src/attestation.rs +++ b/consensus/types/src/attestation/attestation.rs @@ -1,23 +1,28 @@ -use super::{ - AggregateSignature, AttestationData, BitList, ChainSpec, Domain, EthSpec, Fork, SecretKey, - Signature, SignedRoot, +use std::{ + collections::HashSet, + hash::{Hash, Hasher}, }; -use crate::slot_data::SlotData; -use crate::{ - Checkpoint, ContextDeserialize, ForkName, IndexedAttestationBase, IndexedAttestationElectra, -}; -use crate::{Hash256, Slot, test_utils::TestRandom}; -use crate::{IndexedAttestation, context_deserialize}; -use derivative::Derivative; + +use bls::{AggregateSignature, SecretKey, Signature}; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::BitVector; -use std::collections::HashSet; -use std::hash::{Hash, Hasher}; +use ssz_types::{BitList, BitVector}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AttestationData, Checkpoint, IndexedAttestation, IndexedAttestationBase, + IndexedAttestationElectra, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -45,11 +50,11 @@ impl From for Error { Decode, Encode, TestRandom, - Derivative, + Educe, TreeHash, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -66,7 +71,8 @@ impl From for Error { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Educe, Deserialize)] +#[educe(PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -599,18 +605,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for Vec> */ #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive( - Debug, - Clone, - Serialize, - Deserialize, - Decode, - Encode, - TestRandom, - Derivative, - TreeHash, - PartialEq, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Decode, Encode, TestRandom, TreeHash, PartialEq)] #[context_deserialize(ForkName)] pub struct SingleAttestation { #[serde(with = "serde_utils::quoted_u64")] @@ -622,19 +617,22 @@ pub struct SingleAttestation { } impl SingleAttestation { - pub fn to_indexed(&self, fork_name: ForkName) -> IndexedAttestation { + pub fn to_indexed( + &self, + fork_name: ForkName, + ) -> Result, ssz_types::Error> { if fork_name.electra_enabled() { - IndexedAttestation::Electra(IndexedAttestationElectra { - attesting_indices: vec![self.attester_index].into(), + Ok(IndexedAttestation::Electra(IndexedAttestationElectra { + attesting_indices: vec![self.attester_index].try_into()?, data: self.data.clone(), signature: self.signature.clone(), - }) + })) } else { - IndexedAttestation::Base(IndexedAttestationBase { - attesting_indices: vec![self.attester_index].into(), + Ok(IndexedAttestation::Base(IndexedAttestationBase { + attesting_indices: vec![self.attester_index].try_into()?, data: self.data.clone(), signature: self.signature.clone(), - }) + })) } } } diff --git a/consensus/types/src/attestation_data.rs b/consensus/types/src/attestation/attestation_data.rs similarity index 87% rename from consensus/types/src/attestation_data.rs rename to consensus/types/src/attestation/attestation_data.rs index a4643e54741..f3fceb9b70f 100644 --- a/consensus/types/src/attestation_data.rs +++ b/consensus/types/src/attestation/attestation_data.rs @@ -1,11 +1,16 @@ -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{Checkpoint, ForkName, Hash256, SignedRoot, Slot}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; + +use crate::{ + attestation::Checkpoint, + core::{Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data upon which an attestation is based. /// /// Spec v0.12.1 diff --git a/consensus/types/src/attestation_duty.rs b/consensus/types/src/attestation/attestation_duty.rs similarity index 92% rename from consensus/types/src/attestation_duty.rs rename to consensus/types/src/attestation/attestation_duty.rs index 70c7c5c170f..fe3da79a2b1 100644 --- a/consensus/types/src/attestation_duty.rs +++ b/consensus/types/src/attestation/attestation_duty.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::{attestation::CommitteeIndex, core::Slot}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Copy, Default, Serialize, Deserialize)] pub struct AttestationDuty { diff --git a/consensus/types/src/beacon_committee.rs b/consensus/types/src/attestation/beacon_committee.rs similarity index 92% rename from consensus/types/src/beacon_committee.rs rename to consensus/types/src/attestation/beacon_committee.rs index 04fe763a11b..2dba30bad3c 100644 --- a/consensus/types/src/beacon_committee.rs +++ b/consensus/types/src/attestation/beacon_committee.rs @@ -1,4 +1,4 @@ -use crate::*; +use crate::{attestation::CommitteeIndex, core::Slot}; #[derive(Default, Clone, Debug, PartialEq)] pub struct BeaconCommittee<'a> { diff --git a/consensus/types/src/checkpoint.rs b/consensus/types/src/attestation/checkpoint.rs similarity index 88% rename from consensus/types/src/checkpoint.rs rename to consensus/types/src/attestation/checkpoint.rs index 545af59985e..f5a95f0ad94 100644 --- a/consensus/types/src/checkpoint.rs +++ b/consensus/types/src/attestation/checkpoint.rs @@ -1,11 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName, Hash256}; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Epoch, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Casper FFG checkpoint, used in attestations. /// /// Spec v0.12.1 diff --git a/consensus/types/src/indexed_attestation.rs b/consensus/types/src/attestation/indexed_attestation.rs similarity index 94% rename from consensus/types/src/indexed_attestation.rs rename to consensus/types/src/attestation/indexed_attestation.rs index 4ba695b9d51..272b015d907 100644 --- a/consensus/types/src/indexed_attestation.rs +++ b/consensus/types/src/attestation/indexed_attestation.rs @@ -1,17 +1,21 @@ -use crate::context_deserialize; -use crate::{ - AggregateSignature, AttestationData, EthSpec, ForkName, VariableList, test_utils::TestRandom, +use std::{ + hash::{Hash, Hasher}, + slice::Iter, }; -use core::slice::Iter; -use derivative::Derivative; + +use bls::AggregateSignature; +use context_deserialize::context_deserialize; +use educe::Educe; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::hash::{Hash, Hasher}; +use ssz_types::VariableList; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// Details an attestation that can be slashable. /// /// To be included in an `AttesterSlashing`. @@ -28,11 +32,11 @@ use tree_hash_derive::TreeHash; Decode, Encode, TestRandom, - Derivative, + Educe, TreeHash, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -46,7 +50,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, TreeHash, Encode, Derivative, Deserialize, PartialEq)] +#[derive(Debug, Clone, Serialize, TreeHash, Encode, Educe, Deserialize)] +#[educe(PartialEq)] #[serde(untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -207,9 +212,10 @@ impl Hash for IndexedAttestation { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::slot_epoch::Epoch; - use crate::test_utils::{SeedableRng, XorShiftRng}; + use crate::{ + core::{Epoch, MainnetEthSpec}, + test_utils::{SeedableRng, XorShiftRng}, + }; #[test] pub fn test_is_double_vote_true() { diff --git a/consensus/types/src/attestation/indexed_payload_attestation.rs b/consensus/types/src/attestation/indexed_payload_attestation.rs new file mode 100644 index 00000000000..4de805570cf --- /dev/null +++ b/consensus/types/src/attestation/indexed_payload_attestation.rs @@ -0,0 +1,36 @@ +use crate::test_utils::TestRandom; +use crate::{EthSpec, ForkName, PayloadAttestationData}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; +use core::slice::Iter; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(TestRandom, TreeHash, Debug, Clone, PartialEq, Encode, Decode, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", arbitrary(bound = "E: EthSpec"))] +#[context_deserialize(ForkName)] +pub struct IndexedPayloadAttestation { + #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] + pub attesting_indices: VariableList, + pub data: PayloadAttestationData, + pub signature: AggregateSignature, +} + +impl IndexedPayloadAttestation { + pub fn attesting_indices_iter(&self) -> Iter<'_, u64> { + self.attesting_indices.iter() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_and_tree_hash_tests!(IndexedPayloadAttestation); +} diff --git a/consensus/types/src/attestation/mod.rs b/consensus/types/src/attestation/mod.rs new file mode 100644 index 00000000000..586d99bd900 --- /dev/null +++ b/consensus/types/src/attestation/mod.rs @@ -0,0 +1,47 @@ +mod aggregate_and_proof; +mod attestation; +mod attestation_data; +mod attestation_duty; +mod beacon_committee; +mod checkpoint; +mod indexed_attestation; +mod indexed_payload_attestation; +mod participation_flags; +mod payload_attestation; +mod payload_attestation_data; +mod payload_attestation_message; +mod pending_attestation; +mod selection_proof; +mod shuffling_id; +mod signed_aggregate_and_proof; +mod subnet_id; + +pub use aggregate_and_proof::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, +}; +pub use attestation::{ + Attestation, AttestationBase, AttestationElectra, AttestationOnDisk, AttestationRef, + AttestationRefMut, AttestationRefOnDisk, Error as AttestationError, SingleAttestation, +}; +pub use attestation_data::AttestationData; +pub use attestation_duty::AttestationDuty; +pub use beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; +pub use checkpoint::Checkpoint; +pub use indexed_attestation::{ + IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, +}; +pub use indexed_payload_attestation::IndexedPayloadAttestation; +pub use participation_flags::ParticipationFlags; +pub use payload_attestation::PayloadAttestation; +pub use payload_attestation_data::PayloadAttestationData; +pub use payload_attestation_message::PayloadAttestationMessage; +pub use pending_attestation::PendingAttestation; +pub use selection_proof::SelectionProof; +pub use shuffling_id::AttestationShufflingId; +pub use signed_aggregate_and_proof::{ + SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, + SignedAggregateAndProofRefMut, +}; +pub use subnet_id::SubnetId; + +pub type CommitteeIndex = u64; diff --git a/consensus/types/src/participation_flags.rs b/consensus/types/src/attestation/participation_flags.rs similarity index 96% rename from consensus/types/src/participation_flags.rs rename to consensus/types/src/attestation/participation_flags.rs index e59efc51704..66831abfac0 100644 --- a/consensus/types/src/participation_flags.rs +++ b/consensus/types/src/attestation/participation_flags.rs @@ -1,10 +1,14 @@ -use crate::{Hash256, consts::altair::NUM_FLAG_INDICES, test_utils::TestRandom}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; use test_random_derive::TestRandom; use tree_hash::{PackedEncoding, TreeHash, TreeHashType}; +use crate::{ + core::{Hash256, consts::altair::NUM_FLAG_INDICES}, + test_utils::TestRandom, +}; + #[derive(Debug, Default, Clone, Copy, PartialEq, Deserialize, Serialize, TestRandom)] #[serde(transparent)] #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] diff --git a/consensus/types/src/attestation/payload_attestation.rs b/consensus/types/src/attestation/payload_attestation.rs new file mode 100644 index 00000000000..192a4a8fea5 --- /dev/null +++ b/consensus/types/src/attestation/payload_attestation.rs @@ -0,0 +1,31 @@ +use crate::attestation::payload_attestation_data::PayloadAttestationData; +use crate::test_utils::TestRandom; +use crate::{EthSpec, ForkName}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; +use educe::Educe; +use serde::{Deserialize, Serialize}; +use ssz::BitList; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Educe)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[serde(bound = "E: EthSpec", deny_unknown_fields)] +#[cfg_attr(feature = "arbitrary", arbitrary(bound = "E: EthSpec"))] +#[educe(PartialEq, Hash)] +#[context_deserialize(ForkName)] +pub struct PayloadAttestation { + pub aggregation_bits: BitList, + pub data: PayloadAttestationData, + pub signature: AggregateSignature, +} + +#[cfg(test)] +mod payload_attestation_tests { + use super::*; + use crate::MinimalEthSpec; + + ssz_and_tree_hash_tests!(PayloadAttestation); +} diff --git a/consensus/types/src/attestation/payload_attestation_data.rs b/consensus/types/src/attestation/payload_attestation_data.rs new file mode 100644 index 00000000000..58d36fd01d5 --- /dev/null +++ b/consensus/types/src/attestation/payload_attestation_data.rs @@ -0,0 +1,28 @@ +use crate::test_utils::TestRandom; +use crate::{ForkName, Hash256, SignedRoot, Slot}; +use context_deserialize::context_deserialize; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + TestRandom, TreeHash, Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize, Deserialize, Hash, +)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[context_deserialize(ForkName)] +pub struct PayloadAttestationData { + pub beacon_block_root: Hash256, + pub slot: Slot, + pub payload_present: bool, + pub blob_data_available: bool, +} + +impl SignedRoot for PayloadAttestationData {} + +#[cfg(test)] +mod payload_attestation_data_tests { + use super::*; + + ssz_and_tree_hash_tests!(PayloadAttestationData); +} diff --git a/consensus/types/src/attestation/payload_attestation_message.rs b/consensus/types/src/attestation/payload_attestation_message.rs new file mode 100644 index 00000000000..82e2137b096 --- /dev/null +++ b/consensus/types/src/attestation/payload_attestation_message.rs @@ -0,0 +1,26 @@ +use crate::ForkName; +use crate::attestation::payload_attestation_data::PayloadAttestationData; +use crate::test_utils::TestRandom; +use bls::Signature; +use context_deserialize::context_deserialize; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(TestRandom, TreeHash, Debug, Clone, PartialEq, Encode, Decode, Serialize, Deserialize)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[context_deserialize(ForkName)] +pub struct PayloadAttestationMessage { + #[serde(with = "serde_utils::quoted_u64")] + pub validator_index: u64, + pub data: PayloadAttestationData, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(PayloadAttestationMessage); +} diff --git a/consensus/types/src/pending_attestation.rs b/consensus/types/src/attestation/pending_attestation.rs similarity index 84% rename from consensus/types/src/pending_attestation.rs rename to consensus/types/src/attestation/pending_attestation.rs index 4a00a0495ac..84353ac1185 100644 --- a/consensus/types/src/pending_attestation.rs +++ b/consensus/types/src/attestation/pending_attestation.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AttestationData, BitList, EthSpec, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{attestation::AttestationData, core::EthSpec, fork::ForkName, test_utils::TestRandom}; + /// An attestation that has been included in the state but not yet fully processed. /// /// Spec v0.12.1 diff --git a/consensus/types/src/selection_proof.rs b/consensus/types/src/attestation/selection_proof.rs similarity index 95% rename from consensus/types/src/selection_proof.rs rename to consensus/types/src/attestation/selection_proof.rs index aa8c0c5658e..b4c48d00780 100644 --- a/consensus/types/src/selection_proof.rs +++ b/consensus/types/src/attestation/selection_proof.rs @@ -1,11 +1,15 @@ -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use std::cmp; + +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::Fork, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] diff --git a/consensus/types/src/shuffling_id.rs b/consensus/types/src/attestation/shuffling_id.rs similarity index 93% rename from consensus/types/src/shuffling_id.rs rename to consensus/types/src/attestation/shuffling_id.rs index df16f605ed1..25217288f69 100644 --- a/consensus/types/src/shuffling_id.rs +++ b/consensus/types/src/attestation/shuffling_id.rs @@ -1,7 +1,12 @@ -use crate::*; +use std::hash::Hash; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; + +use crate::{ + core::{Epoch, EthSpec, Hash256, RelativeEpoch}, + state::{BeaconState, BeaconStateError}, +}; /// Can be used to key (ID) the shuffling in some chain, in some epoch. /// diff --git a/consensus/types/src/signed_aggregate_and_proof.rs b/consensus/types/src/attestation/signed_aggregate_and_proof.rs similarity index 90% rename from consensus/types/src/signed_aggregate_and_proof.rs rename to consensus/types/src/attestation/signed_aggregate_and_proof.rs index 758ac2734b7..48c3f4c567e 100644 --- a/consensus/types/src/signed_aggregate_and_proof.rs +++ b/consensus/types/src/attestation/signed_aggregate_and_proof.rs @@ -1,18 +1,21 @@ -use super::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -use super::{ - Attestation, AttestationRef, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - SelectionProof, Signature, SignedRoot, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{ + AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, + Attestation, AttestationRef, SelectionProof, + }, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A Validators signed aggregate proof to publish on the `beacon_aggregate_and_proof` /// gossipsub topic. /// diff --git a/consensus/types/src/subnet_id.rs b/consensus/types/src/attestation/subnet_id.rs similarity index 97% rename from consensus/types/src/subnet_id.rs rename to consensus/types/src/attestation/subnet_id.rs index 6ec8ca4a27f..9585d077b5c 100644 --- a/consensus/types/src/subnet_id.rs +++ b/consensus/types/src/attestation/subnet_id.rs @@ -1,11 +1,17 @@ //! Identifies each shard by an integer identifier. -use crate::SingleAttestation; -use crate::{AttestationRef, ChainSpec, CommitteeIndex, EthSpec, Slot}; +use std::{ + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use alloy_primitives::{U256, bytes::Buf}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; + +use crate::{ + attestation::{AttestationRef, CommitteeIndex, SingleAttestation}, + core::{ChainSpec, EthSpec, Slot}, +}; const MAX_SUBNET_ID: usize = 64; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/block/beacon_block.rs similarity index 94% rename from consensus/types/src/beacon_block.rs rename to consensus/types/src/block/beacon_block.rs index 61c32dd4ac9..bee3cdb2746 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/block/beacon_block.rs @@ -1,18 +1,41 @@ -use crate::attestation::AttestationBase; -use crate::test_utils::TestRandom; -use crate::*; -use derivative::Derivative; +use std::{fmt, marker::PhantomData}; + +use bls::{AggregateSignature, PublicKeyBytes, SecretKey, Signature, SignatureBytes}; +use context_deserialize::ContextDeserialize; +use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError}; use ssz_derive::{Decode, Encode}; -use std::fmt; -use std::marker::PhantomData; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; - -use self::indexed_attestation::IndexedAttestationBase; +use typenum::Unsigned; + +use crate::{ + SignedExecutionPayloadBid, + attestation::{AttestationBase, AttestationData, IndexedAttestationBase}, + block::{ + BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, BeaconBlockHeader, + SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Graffiti, Hash256, SignedRoot, Slot}, + deposit::{Deposit, DepositData}, + execution::{ + AbstractExecPayload, BlindedPayload, Eth1Data, ExecutionPayload, ExecutionRequests, + FullPayload, + }, + exit::{SignedVoluntaryExit, VoluntaryExit}, + fork::{Fork, ForkName, InconsistentFork, map_fork_name}, + slashing::{AttesterSlashingBase, ProposerSlashing}, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// A block of the `BeaconChain`. #[superstruct( @@ -27,9 +50,9 @@ use self::indexed_attestation::IndexedAttestationBase; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), + educe(PartialEq, Hash(bound(E: EthSpec, Payload: AbstractExecPayload))), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields @@ -52,8 +75,8 @@ use self::indexed_attestation::IndexedAttestationBase; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] @@ -283,7 +306,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl /// Extracts a reference to an execution payload from a block, returning an error if the block /// is pre-merge. - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.body().execution_payload() } } @@ -672,15 +695,41 @@ impl> EmptyBlock for BeaconBlockGloa deposits: VariableList::empty(), voluntary_exits: VariableList::empty(), sync_aggregate: SyncAggregate::empty(), - execution_payload: Payload::Gloas::default(), bls_to_execution_changes: VariableList::empty(), - blob_kzg_commitments: VariableList::empty(), - execution_requests: ExecutionRequests::default(), + signed_execution_payload_bid: SignedExecutionPayloadBid::empty(), + payload_attestations: VariableList::empty(), + _phantom: PhantomData, }, } } } +// TODO(EIP-7732) Mark's branch had the following implementation but not sure if it's needed so will just add header below for reference +// impl> BeaconBlockEIP7732 { + +// TODO(EIP-7732) Look into whether we can remove this in the future since no blinded blocks post-gloas +impl From>> + for BeaconBlockGloas> +{ + fn from(block: BeaconBlockGloas>) -> Self { + let BeaconBlockGloas { + slot, + proposer_index, + parent_root, + state_root, + body, + } = block; + + BeaconBlockGloas { + slot, + proposer_index, + parent_root, + state_root, + body: body.into(), + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> @@ -865,7 +914,10 @@ impl fmt::Display for BlockImportSource { #[cfg(test)] mod tests { use super::*; - use crate::test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}; + use crate::{ + core::MainnetEthSpec, + test_utils::{SeedableRng, XorShiftRng, test_ssz_tree_hash_pair_with}, + }; use ssz::Encode; type BeaconBlock = super::BeaconBlock; diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/block/beacon_block_body.rs similarity index 88% rename from consensus/types/src/beacon_block_body.rs rename to consensus/types/src/block/beacon_block_body.rs index e636fbb5346..1a0b3859002 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/block/beacon_block_body.rs @@ -1,18 +1,43 @@ -use crate::test_utils::TestRandom; -use crate::*; -use derivative::Derivative; +use std::marker::PhantomData; + +use bls::Signature; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use merkle_proof::{MerkleTree, MerkleTreeError}; use metastruct::metastruct; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::{FixedVector, VariableList}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::{BYTES_PER_CHUNK, TreeHash}; use tree_hash_derive::TreeHash; -pub type KzgCommitments = - VariableList::MaxBlobCommitmentsPerBlock>; +use crate::payload_attestation::PayloadAttestation; +use crate::{ + SignedExecutionPayloadBid, + attestation::{AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut}, + core::{EthSpec, Graffiti, Hash256}, + deposit::Deposit, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, Eth1Data, ExecutionPayload, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionRequests, + FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, + FullPayloadElectra, FullPayloadFulu, SignedBlsToExecutionChange, + }, + exit::SignedVoluntaryExit, + fork::{ForkName, map_fork_name}, + kzg_ext::KzgCommitments, + light_client::consts::{EXECUTION_PAYLOAD_INDEX, EXECUTION_PAYLOAD_PROOF_LEN}, + slashing::{ + AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingRef, ProposerSlashing, + }, + state::BeaconStateError, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// @@ -39,9 +64,9 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec, Payload: AbstractExecPayload")), + educe(PartialEq, Hash(bound(E: EthSpec, Payload: AbstractExecPayload))), serde( bound = "E: EthSpec, Payload: AbstractExecPayload", deny_unknown_fields @@ -63,16 +88,22 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))), Gloas(metastruct(mappings(beacon_block_body_gloas_fields(groups(fields))))), ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Derivative, TreeHash)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Educe, TreeHash)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] @@ -127,17 +158,18 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] #[serde(flatten)] pub execution_payload: Payload::Fulu, - #[superstruct(only(Gloas), partial_getter(rename = "execution_payload_gloas"))] - #[serde(flatten)] - pub execution_payload: Payload::Gloas, #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas))] pub bls_to_execution_changes: VariableList, - #[superstruct(only(Deneb, Electra, Fulu, Gloas))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, - #[superstruct(only(Electra, Fulu, Gloas))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: ExecutionRequests, - #[superstruct(only(Base, Altair))] + #[superstruct(only(Gloas))] + pub signed_execution_payload_bid: SignedExecutionPayloadBid, + #[superstruct(only(Gloas))] + pub payload_attestations: VariableList, E::MaxPayloadAttestations>, + #[superstruct(only(Base, Altair, Gloas))] #[metastruct(exclude_from(fields))] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] @@ -147,7 +179,7 @@ pub struct BeaconBlockBody = FullPay } impl> BeaconBlockBody { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { self.to_ref().execution_payload() } @@ -158,15 +190,15 @@ impl> BeaconBlockBody { } impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Payload> { - pub fn execution_payload(&self) -> Result, Error> { + pub fn execution_payload(&self) -> Result, BeaconStateError> { match self { - Self::Base(_) | Self::Altair(_) => Err(Error::IncorrectStateVariant), + Self::Base(_) | Self::Altair(_) => Err(BeaconStateError::IncorrectStateVariant), Self::Bellatrix(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Electra(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Fulu(body) => Ok(Payload::Ref::from(&body.execution_payload)), - Self::Gloas(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } @@ -216,7 +248,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, pub fn kzg_commitment_merkle_proof( &self, index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let kzg_commitments_proof = self.kzg_commitments_merkle_proof()?; let proof = self.complete_kzg_commitment_merkle_proof(index, &kzg_commitments_proof)?; Ok(proof) @@ -224,16 +256,19 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Produces the proof of inclusion for a `KzgCommitment` in `self.blob_kzg_commitments` /// at `index` using an existing proof for the `blob_kzg_commitments` field. + /// TODO(EIP7732) Investigate calling functions since this will no longer work for glas since no block_kzg_commitments in the body anymore pub fn complete_kzg_commitment_merkle_proof( &self, index: usize, kzg_commitments_proof: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { - Err(Error::IncorrectStateVariant) - } - Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) | Self::Gloas(_) => { + Self::Base(_) + | Self::Altair(_) + | Self::Bellatrix(_) + | Self::Capella(_) + | Self::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), + Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) => { // We compute the branches by generating 2 merkle trees: // 1. Merkle tree for the `blob_kzg_commitments` List object // 2. Merkle tree for the `BeaconBlockBody` container @@ -253,7 +288,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let tree = MerkleTree::create(&blob_leaves, depth as usize); let (_, mut proof) = tree .generate_proof(index, depth as usize) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; // Add the branch corresponding to the length mix-in. let length = blob_leaves.len(); @@ -261,7 +296,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, let mut length_bytes = [0; BYTES_PER_CHUNK]; length_bytes .get_mut(0..usize_len) - .ok_or(Error::MerkleTreeError(MerkleTreeError::PleaseNotifyTheDevs))? + .ok_or(BeaconStateError::MerkleTreeError( + MerkleTreeError::PleaseNotifyTheDevs, + ))? .copy_from_slice(&length.to_le_bytes()); let length_root = Hash256::from_slice(length_bytes.as_slice()); proof.push(length_root); @@ -279,32 +316,41 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Produces the proof of inclusion for `self.blob_kzg_commitments`. pub fn kzg_commitments_merkle_proof( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let body_leaves = self.body_merkle_leaves(); let beacon_block_body_depth = body_leaves.len().next_power_of_two().ilog2() as usize; let tree = MerkleTree::create(&body_leaves, beacon_block_body_depth); let (_, proof) = tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; Ok(FixedVector::new(proof)?) } - pub fn block_body_merkle_proof(&self, generalized_index: usize) -> Result, Error> { + pub fn block_body_merkle_proof( + &self, + generalized_index: usize, + ) -> Result, BeaconStateError> { let field_index = match generalized_index { - light_client_update::EXECUTION_PAYLOAD_INDEX => { + EXECUTION_PAYLOAD_INDEX => { // Execution payload is a top-level field, subtract off the generalized indices // for the internal nodes. Result should be 9, the field offset of the execution // payload in the `BeaconBlockBody`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody generalized_index .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::GeneralizedIndexNotSupported(generalized_index))? + .ok_or(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + ))? + } + _ => { + return Err(BeaconStateError::GeneralizedIndexNotSupported( + generalized_index, + )); } - _ => return Err(Error::GeneralizedIndexNotSupported(generalized_index)), }; let leaves = self.body_merkle_leaves(); - let depth = light_client_update::EXECUTION_PAYLOAD_PROOF_LEN; + let depth = EXECUTION_PAYLOAD_PROOF_LEN; let tree = merkle_proof::MerkleTree::create(&leaves, depth); let (_, proof) = tree.generate_proof(field_index, depth)?; @@ -500,6 +546,46 @@ impl From>> } } +// Post-Fulu block bodies without payloads can be converted into block bodies with payloads +// TODO(EIP-7732) Look into whether we can remove this in the future since no blinded blocks post-gloas +impl From>> + for BeaconBlockBodyGloas> +{ + fn from(body: BeaconBlockBodyGloas>) -> Self { + let BeaconBlockBodyGloas { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + bls_to_execution_changes, + signed_execution_payload_bid, + payload_attestations, + _phantom, + } = body; + + BeaconBlockBodyGloas { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + bls_to_execution_changes, + signed_execution_payload_bid, + payload_attestations, + _phantom: PhantomData, + } + } +} + // Likewise bodies with payloads can be transformed into bodies without. impl From>> for ( @@ -810,10 +896,10 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: FullPayloadGloas { execution_payload }, bls_to_execution_changes, - blob_kzg_commitments, - execution_requests, + signed_execution_payload_bid, + payload_attestations, + _phantom, } = body; ( @@ -827,14 +913,12 @@ impl From>> deposits, voluntary_exits, sync_aggregate, - execution_payload: BlindedPayloadGloas { - execution_payload_header: From::from(&execution_payload), - }, bls_to_execution_changes, - blob_kzg_commitments: blob_kzg_commitments.clone(), - execution_requests, + signed_execution_payload_bid, + payload_attestations, + _phantom: PhantomData, }, - Some(execution_payload), + None, ) } } @@ -1034,39 +1118,8 @@ impl BeaconBlockBodyFulu> { impl BeaconBlockBodyGloas> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyGloas> { - let BeaconBlockBodyGloas { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings, - attester_slashings, - attestations, - deposits, - voluntary_exits, - sync_aggregate, - execution_payload: FullPayloadGloas { execution_payload }, - bls_to_execution_changes, - blob_kzg_commitments, - execution_requests, - } = self; - - BeaconBlockBodyGloas { - randao_reveal: randao_reveal.clone(), - eth1_data: eth1_data.clone(), - graffiti: *graffiti, - proposer_slashings: proposer_slashings.clone(), - attester_slashings: attester_slashings.clone(), - attestations: attestations.clone(), - deposits: deposits.clone(), - voluntary_exits: voluntary_exits.clone(), - sync_aggregate: sync_aggregate.clone(), - execution_payload: BlindedPayloadGloas { - execution_payload_header: execution_payload.into(), - }, - bls_to_execution_changes: bls_to_execution_changes.clone(), - blob_kzg_commitments: blob_kzg_commitments.clone(), - execution_requests: execution_requests.clone(), - } + let (block_body, _payload) = self.clone().into(); + block_body } } @@ -1100,22 +1153,16 @@ impl<'de, E: EthSpec, Payload: AbstractExecPayload> ContextDeserialize<'de, F } } -/// Util method helpful for logging. -pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { - let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); - let commitments_joined = commitment_strings.join(", "); - let surrounded_commitments = format!("[{}]", commitments_joined); - surrounded_commitments -} - #[cfg(test)] mod tests { mod base { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyBase); } mod altair { use super::super::*; + use crate::core::MainnetEthSpec; ssz_and_tree_hash_tests!(BeaconBlockBodyAltair); } } diff --git a/consensus/types/src/beacon_block_header.rs b/consensus/types/src/block/beacon_block_header.rs similarity index 90% rename from consensus/types/src/beacon_block_header.rs rename to consensus/types/src/block/beacon_block_header.rs index e14a9fc8af7..06e1023d911 100644 --- a/consensus/types/src/beacon_block_header.rs +++ b/consensus/types/src/block/beacon_block_header.rs @@ -1,6 +1,4 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::SecretKey; use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; @@ -8,6 +6,13 @@ use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/block/mod.rs b/consensus/types/src/block/mod.rs new file mode 100644 index 00000000000..81c8ffbd639 --- /dev/null +++ b/consensus/types/src/block/mod.rs @@ -0,0 +1,26 @@ +mod beacon_block; +mod beacon_block_body; +mod beacon_block_header; +mod signed_beacon_block; +mod signed_beacon_block_header; + +pub use beacon_block::{ + BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, + BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, +}; +pub use beacon_block_body::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, + BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, + NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES, +}; +pub use beacon_block_header::BeaconBlockHeader; + +pub use signed_beacon_block::{ + SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, + SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, + ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, +}; +pub use signed_beacon_block_header::SignedBeaconBlockHeader; diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/block/signed_beacon_block.rs similarity index 93% rename from consensus/types/src/signed_beacon_block.rs rename to consensus/types/src/block/signed_beacon_block.rs index 979b91e30d5..aeb3c18d957 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/block/signed_beacon_block.rs @@ -1,16 +1,40 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, format_kzg_commitments}; -use crate::test_utils::TestRandom; -use crate::*; -use derivative::Derivative; +use std::fmt; + +use bls::{PublicKey, Signature}; +use context_deserialize::ContextDeserialize; +use educe::Educe; use merkle_proof::MerkleTree; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, + BeaconBlockBellatrix, BeaconBlockBodyBellatrix, BeaconBlockBodyCapella, + BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconBlockCapella, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockHeader, + BeaconBlockRef, BeaconBlockRefMut, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, SignedRoot, SigningData, Slot}, + execution::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, ExecutionPayload, + ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + kzg_ext::format_kzg_commitments, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct SignedBeaconBlockHash(Hash256); @@ -51,10 +75,10 @@ impl From for Hash256 { Encode, Decode, TreeHash, - Derivative, + Educe, TestRandom ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec, Payload: AbstractExecPayload"), cfg_attr( feature = "arbitrary", @@ -71,8 +95,8 @@ impl From for Hash256 { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec, Payload: AbstractExecPayload") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(untagged)] #[serde(bound = "E: EthSpec, Payload: AbstractExecPayload")] #[tree_hash(enum_behaviour = "transparent")] @@ -253,6 +277,7 @@ impl> SignedBeaconBlock } /// Produce a signed beacon block header corresponding to this block. + #[instrument(level = "debug", skip_all)] pub fn signed_block_header(&self) -> SignedBeaconBlockHeader { SignedBeaconBlockHeader { message: self.message().block_header(), @@ -270,7 +295,7 @@ impl> SignedBeaconBlock SignedBeaconBlockHeader, FixedVector, ), - Error, + BeaconStateError, > { // Create the block body merkle tree let body_leaves = self.message().body().body_merkle_leaves(); @@ -280,7 +305,7 @@ impl> SignedBeaconBlock // Compute the KZG commitments inclusion proof let (_, proof) = body_merkle_tree .generate_proof(BLOB_KZG_COMMITMENTS_INDEX, beacon_block_body_depth) - .map_err(Error::MerkleTreeError)?; + .map_err(BeaconStateError::MerkleTreeError)?; let kzg_commitments_inclusion_proof = FixedVector::new(proof)?; let block_header = BeaconBlockHeader { @@ -648,59 +673,15 @@ impl SignedBeaconBlockFulu> { } } -impl SignedBeaconBlockGloas> { - pub fn into_full_block( - self, - execution_payload: ExecutionPayloadGloas, - ) -> SignedBeaconBlockGloas> { - let SignedBeaconBlockGloas { - message: - BeaconBlockGloas { - slot, - proposer_index, - parent_root, - state_root, - body: - BeaconBlockBodyGloas { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings, - attester_slashings, - attestations, - deposits, - voluntary_exits, - sync_aggregate, - execution_payload: BlindedPayloadGloas { .. }, - bls_to_execution_changes, - blob_kzg_commitments, - execution_requests, - }, - }, - signature, - } = self; +// We can convert gloas blocks without payloads into blocks "with" payloads. +// TODO(EIP-7732) Look into whether we can remove this in the future since no blinded blocks post-gloas +impl From>> + for SignedBeaconBlockGloas> +{ + fn from(signed_block: SignedBeaconBlockGloas>) -> Self { + let SignedBeaconBlockGloas { message, signature } = signed_block; SignedBeaconBlockGloas { - message: BeaconBlockGloas { - slot, - proposer_index, - parent_root, - state_root, - body: BeaconBlockBodyGloas { - randao_reveal, - eth1_data, - graffiti, - proposer_slashings, - attester_slashings, - attestations, - deposits, - voluntary_exits, - sync_aggregate, - execution_payload: FullPayloadGloas { execution_payload }, - bls_to_execution_changes, - blob_kzg_commitments, - execution_requests, - }, - }, + message: message.into(), signature, } } @@ -729,9 +710,7 @@ impl SignedBeaconBlock> { (SignedBeaconBlock::Fulu(block), Some(ExecutionPayload::Fulu(payload))) => { SignedBeaconBlock::Fulu(block.into_full_block(payload)) } - (SignedBeaconBlock::Gloas(block), Some(ExecutionPayload::Gloas(payload))) => { - SignedBeaconBlock::Gloas(block.into_full_block(payload)) - } + (SignedBeaconBlock::Gloas(block), _) => SignedBeaconBlock::Gloas(block.into()), // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added (SignedBeaconBlock::Bellatrix(_), _) => return None, @@ -739,7 +718,7 @@ impl SignedBeaconBlock> { (SignedBeaconBlock::Deneb(_), _) => return None, (SignedBeaconBlock::Electra(_), _) => return None, (SignedBeaconBlock::Fulu(_), _) => return None, - (SignedBeaconBlock::Gloas(_), _) => return None, + // TODO(EIP-7732) Determine if need a match arm for gloas here }; Some(full_block) } @@ -917,6 +896,7 @@ pub mod ssz_tagged_signed_beacon_block_arc { #[cfg(test)] mod test { use super::*; + use crate::{block::EmptyBlock, core::MainnetEthSpec}; #[test] fn add_remove_payload_roundtrip() { diff --git a/consensus/types/src/signed_beacon_block_header.rs b/consensus/types/src/block/signed_beacon_block_header.rs similarity index 84% rename from consensus/types/src/signed_beacon_block_header.rs rename to consensus/types/src/block/signed_beacon_block_header.rs index 4a5ff2ec1a4..2fcd8a705f0 100644 --- a/consensus/types/src/signed_beacon_block_header.rs +++ b/consensus/types/src/block/signed_beacon_block_header.rs @@ -1,13 +1,17 @@ -use crate::context_deserialize; -use crate::{ - BeaconBlockHeader, ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, PublicKey, Signature, - SignedRoot, test_utils::TestRandom, -}; +use bls::{PublicKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::BeaconBlockHeader, + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// A signed header of a `BeaconBlock`. /// /// Spec v0.12.1 diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder/builder_bid.rs similarity index 86% rename from consensus/types/src/builder_bid.rs rename to consensus/types/src/builder/builder_bid.rs index 3fb7af35ca1..1018fadb644 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder/builder_bid.rs @@ -1,13 +1,6 @@ -use crate::beacon_block_body::KzgCommitments; -use crate::{ - ChainSpec, ContextDeserialize, EthSpec, ExecutionPayloadHeaderBellatrix, - ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, - ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ExecutionRequests, ForkName, ForkVersionDecode, SignedRoot, - Uint256, test_utils::TestRandom, -}; use bls::PublicKeyBytes; use bls::Signature; +use context_deserialize::ContextDeserialize; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; @@ -15,8 +8,20 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, SignedRoot, Uint256}, + execution::{ + ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ExecutionRequests, + }, + fork::{ForkName, ForkVersionDecode}, + kzg_ext::KzgCommitments, + test_utils::TestRandom, +}; + #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( PartialEq, @@ -49,11 +54,9 @@ pub struct BuilderBid { pub header: ExecutionPayloadHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] pub header: ExecutionPayloadHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "header_gloas"))] - pub header: ExecutionPayloadHeaderGloas, - #[superstruct(only(Deneb, Electra, Fulu, Gloas))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, - #[superstruct(only(Electra, Fulu, Gloas))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: ExecutionRequests, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, @@ -86,7 +89,7 @@ impl ForkVersionDecode for BuilderBid { /// SSZ decode with explicit fork variant. fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result { let builder_bid = match fork_name { - ForkName::Altair | ForkName::Base => { + ForkName::Altair | ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "unsupported fork for ExecutionPayloadHeader: {fork_name}", ))); @@ -98,7 +101,6 @@ impl ForkVersionDecode for BuilderBid { ForkName::Deneb => BuilderBid::Deneb(BuilderBidDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => BuilderBid::Electra(BuilderBidElectra::from_ssz_bytes(bytes)?), ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu::from_ssz_bytes(bytes)?), - ForkName::Gloas => BuilderBid::Gloas(BuilderBidGloas::from_ssz_bytes(bytes)?), }; Ok(builder_bid) } @@ -154,10 +156,7 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for BuilderBid { ForkName::Fulu => { Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } - ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) - } - ForkName::Base | ForkName::Altair => { + ForkName::Base | ForkName::Altair | ForkName::Gloas => { return Err(serde::de::Error::custom(format!( "BuilderBid failed to deserialize: unsupported fork '{}'", context diff --git a/consensus/types/src/builder/builder_pending_payment.rs b/consensus/types/src/builder/builder_pending_payment.rs new file mode 100644 index 00000000000..0f1b68ad970 --- /dev/null +++ b/consensus/types/src/builder/builder_pending_payment.rs @@ -0,0 +1,36 @@ +use crate::test_utils::TestRandom; +use crate::{BuilderPendingWithdrawal, ForkName}; +use context_deserialize::context_deserialize; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + Debug, + PartialEq, + Eq, + Hash, + Clone, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[context_deserialize(ForkName)] +pub struct BuilderPendingPayment { + #[serde(with = "serde_utils::quoted_u64")] + pub weight: u64, + pub withdrawal: BuilderPendingWithdrawal, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BuilderPendingPayment); +} diff --git a/consensus/types/src/builder/builder_pending_withdrawal.rs b/consensus/types/src/builder/builder_pending_withdrawal.rs new file mode 100644 index 00000000000..436d331c003 --- /dev/null +++ b/consensus/types/src/builder/builder_pending_withdrawal.rs @@ -0,0 +1,40 @@ +use crate::test_utils::TestRandom; +use crate::{Address, Epoch, ForkName}; +use context_deserialize::context_deserialize; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + Debug, + PartialEq, + Eq, + Hash, + Clone, + Default, + Serialize, + Deserialize, + Encode, + Decode, + TreeHash, + TestRandom, +)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[context_deserialize(ForkName)] +pub struct BuilderPendingWithdrawal { + #[serde(with = "serde_utils::address_hex")] + pub fee_recipient: Address, + #[serde(with = "serde_utils::quoted_u64")] + pub amount: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub withdrawable_epoch: Epoch, +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(BuilderPendingWithdrawal); +} diff --git a/consensus/types/src/builder/mod.rs b/consensus/types/src/builder/mod.rs new file mode 100644 index 00000000000..54d0ae4eb73 --- /dev/null +++ b/consensus/types/src/builder/mod.rs @@ -0,0 +1,10 @@ +mod builder_bid; +mod builder_pending_payment; +mod builder_pending_withdrawal; + +pub use builder_bid::{ + BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, + BuilderBidFulu, SignedBuilderBid, +}; +pub use builder_pending_payment::BuilderPendingPayment; +pub use builder_pending_withdrawal::BuilderPendingWithdrawal; diff --git a/consensus/types/src/consolidation_request.rs b/consensus/types/src/consolidation/consolidation_request.rs similarity index 84% rename from consensus/types/src/consolidation_request.rs rename to consensus/types/src/consolidation/consolidation_request.rs index 2af3426b68f..3f09517a903 100644 --- a/consensus/types/src/consolidation_request.rs +++ b/consensus/types/src/consolidation/consolidation_request.rs @@ -1,11 +1,17 @@ -use crate::context_deserialize; -use crate::{Address, ForkName, PublicKeyBytes, SignedRoot, test_utils::TestRandom}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/consolidation/mod.rs b/consensus/types/src/consolidation/mod.rs new file mode 100644 index 00000000000..a6a2f4a3317 --- /dev/null +++ b/consensus/types/src/consolidation/mod.rs @@ -0,0 +1,5 @@ +mod consolidation_request; +mod pending_consolidation; + +pub use consolidation_request::ConsolidationRequest; +pub use pending_consolidation::PendingConsolidation; diff --git a/consensus/types/src/pending_consolidation.rs b/consensus/types/src/consolidation/pending_consolidation.rs similarity index 86% rename from consensus/types/src/pending_consolidation.rs rename to consensus/types/src/consolidation/pending_consolidation.rs index 9fb8c3566db..fcd76e43b65 100644 --- a/consensus/types/src/pending_consolidation.rs +++ b/consensus/types/src/consolidation/pending_consolidation.rs @@ -1,11 +1,11 @@ -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/application_domain.rs b/consensus/types/src/core/application_domain.rs similarity index 100% rename from consensus/types/src/application_domain.rs rename to consensus/types/src/core/application_domain.rs diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/core/chain_spec.rs similarity index 98% rename from consensus/types/src/chain_spec.rs rename to consensus/types/src/core/chain_spec.rs index 3565c714e06..da3f9b90ccc 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/core/chain_spec.rs @@ -1,19 +1,27 @@ -use crate::application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; -use crate::blob_sidecar::BlobIdentifier; -use crate::data_column_sidecar::DataColumnsByRootIdentifier; -use crate::*; -use derivative::Derivative; +use std::{fs::File, path::Path, time::Duration}; + +use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; use ssz::Encode; -use std::fs::File; -use std::path::Path; -use std::time::Duration; +use ssz_types::{RuntimeVariableList, VariableList}; use tree_hash::TreeHash; +use crate::{ + core::{ + APPLICATION_DOMAIN_BUILDER, Address, ApplicationDomain, EnrForkId, Epoch, EthSpec, + EthSpecId, Hash256, MainnetEthSpec, Slot, Uint256, + }, + data::{BlobIdentifier, DataColumnSubnetId, DataColumnsByRootIdentifier}, + execution::ExecutionBlockHash, + fork::{Fork, ForkData, ForkName}, + state::BeaconState, +}; + /// Each of the BLS signature domains. #[derive(Debug, PartialEq, Clone, Copy)] pub enum Domain { @@ -28,6 +36,8 @@ pub enum Domain { SyncCommittee, ContributionAndProof, SyncCommitteeSelectionProof, + BeaconBuilder, + PTCAttester, ApplicationMask(ApplicationDomain), } @@ -81,6 +91,7 @@ pub struct ChainSpec { pub bls_withdrawal_prefix_byte: u8, pub eth1_address_withdrawal_prefix_byte: u8, pub compounding_withdrawal_prefix_byte: u8, + pub builder_withdrawal_prefix_byte: u8, /* * Time parameters @@ -119,6 +130,8 @@ pub struct ChainSpec { pub(crate) domain_voluntary_exit: u32, pub(crate) domain_selection_proof: u32, pub(crate) domain_aggregate_and_proof: u32, + pub(crate) domain_beacon_builder: u32, + pub(crate) domain_ptc_attester: u32, /* * Fork choice @@ -220,6 +233,8 @@ pub struct ChainSpec { pub gloas_fork_version: [u8; 4], /// The Gloas fork epoch is optional, with `None` representing "Gloas never happens". pub gloas_fork_epoch: Option, + pub builder_payment_threshold_numerator: u64, + pub builder_payment_threshold_denominator: u64, /* * Networking @@ -527,6 +542,8 @@ impl ChainSpec { Domain::VoluntaryExit => self.domain_voluntary_exit, Domain::SelectionProof => self.domain_selection_proof, Domain::AggregateAndProof => self.domain_aggregate_and_proof, + Domain::BeaconBuilder => self.domain_beacon_builder, + Domain::PTCAttester => self.domain_ptc_attester, Domain::SyncCommittee => self.domain_sync_committee, Domain::ContributionAndProof => self.domain_contribution_and_proof, Domain::SyncCommitteeSelectionProof => self.domain_sync_committee_selection_proof, @@ -964,6 +981,7 @@ impl ChainSpec { bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, compounding_withdrawal_prefix_byte: 0x02, + builder_withdrawal_prefix_byte: 0x03, /* * Time parameters @@ -1003,6 +1021,8 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_beacon_builder: 0x1B, + domain_ptc_attester: 0x0C, /* * Fork choice @@ -1123,6 +1143,8 @@ impl ChainSpec { */ gloas_fork_version: [0x07, 0x00, 0x00, 0x00], gloas_fork_epoch: None, + builder_payment_threshold_numerator: 6, + builder_payment_threshold_denominator: 10, /* * Network specific @@ -1325,6 +1347,7 @@ impl ChainSpec { bls_withdrawal_prefix_byte: 0x00, eth1_address_withdrawal_prefix_byte: 0x01, compounding_withdrawal_prefix_byte: 0x02, + builder_withdrawal_prefix_byte: 0x03, /* * Time parameters @@ -1364,6 +1387,8 @@ impl ChainSpec { domain_voluntary_exit: 4, domain_selection_proof: 5, domain_aggregate_and_proof: 6, + domain_beacon_builder: 0x1B, + domain_ptc_attester: 0x0C, /* * Fork choice @@ -1483,6 +1508,8 @@ impl ChainSpec { */ gloas_fork_version: [0x07, 0x00, 0x00, 0x64], gloas_fork_epoch: None, + builder_payment_threshold_numerator: 6, + builder_payment_threshold_denominator: 10, /* * Network specific @@ -1566,15 +1593,15 @@ pub struct BlobParameters { // A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Debug, Derivative, Clone)] -#[derivative(PartialEq)] +#[derive(Debug, Educe, Clone)] +#[educe(PartialEq)] pub struct BlobSchedule { schedule: Vec, // This is a hack to prevent the blob schedule being serialized on the /eth/v1/config/spec // endpoint prior to the Fulu fork being scheduled. // // We can remove this once Fulu is live on mainnet. - #[derivative(PartialEq = "ignore")] + #[educe(PartialEq(ignore))] skip_serializing: bool, } @@ -2093,7 +2120,7 @@ fn max_data_columns_by_root_request_common(max_request_blocks: u64) let empty_data_columns_by_root_id = DataColumnsByRootIdentifier { block_root: Hash256::zero(), - columns: VariableList::from(vec![0; E::number_of_columns()]), + columns: VariableList::repeat_full(0), }; RuntimeVariableList::>::new( @@ -2509,6 +2536,8 @@ mod tests { &spec, ); test_domain(Domain::SyncCommittee, spec.domain_sync_committee, &spec); + test_domain(Domain::BeaconBuilder, spec.domain_beacon_builder, &spec); + test_domain(Domain::PTCAttester, spec.domain_ptc_attester, &spec); // The builder domain index is zero let builder_domain_pre_mask = [0; 4]; @@ -2581,6 +2610,7 @@ mod tests { #[cfg(test)] mod yaml_tests { use super::*; + use crate::core::MinimalEthSpec; use paste::paste; use std::sync::Arc; use tempfile::NamedTempFile; diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/core/config_and_preset.rs similarity index 95% rename from consensus/types/src/config_and_preset.rs rename to consensus/types/src/core/config_and_preset.rs index 16b09c9c088..08141c77311 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/core/config_and_preset.rs @@ -1,13 +1,14 @@ -use crate::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, - ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts::altair, consts::deneb, -}; use maplit::hashmap; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::collections::HashMap; use superstruct::superstruct; +use crate::core::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, ChainSpec, Config, DenebPreset, + ElectraPreset, EthSpec, FuluPreset, GloasPreset, consts, +}; + /// Fusion of a runtime-config with the compile-time preset values. /// /// Mostly useful for the API. @@ -131,11 +132,11 @@ pub fn get_extra_fields(spec: &ChainSpec) -> HashMap { "domain_sync_committee_selection_proof".to_uppercase() => u32_hex(spec.domain_sync_committee_selection_proof), "sync_committee_subnet_count".to_uppercase() => - altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), + consts::altair::SYNC_COMMITTEE_SUBNET_COUNT.to_string().into(), "target_aggregators_per_sync_subcommittee".to_uppercase() => - altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), + consts::altair::TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE.to_string().into(), // Deneb - "versioned_hash_version_kzg".to_uppercase() => deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), + "versioned_hash_version_kzg".to_uppercase() => consts::deneb::VERSIONED_HASH_VERSION_KZG.to_string().into(), // Electra "compounding_withdrawal_prefix".to_uppercase() => u8_hex(spec.compounding_withdrawal_prefix_byte), "unset_deposit_requests_start_index".to_uppercase() => spec.unset_deposit_requests_start_index.to_string().into(), diff --git a/consensus/types/src/consts.rs b/consensus/types/src/core/consts.rs similarity index 94% rename from consensus/types/src/consts.rs rename to consensus/types/src/core/consts.rs index c20d5fe8f33..b6d63c47a88 100644 --- a/consensus/types/src/consts.rs +++ b/consensus/types/src/core/consts.rs @@ -23,5 +23,5 @@ pub mod bellatrix { pub const INTERVALS_PER_SLOT: u64 = 3; } pub mod deneb { - pub use crate::VERSIONED_HASH_VERSION_KZG; + pub use kzg::VERSIONED_HASH_VERSION_KZG; } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/core/enr_fork_id.rs similarity index 95% rename from consensus/types/src/enr_fork_id.rs rename to consensus/types/src/core/enr_fork_id.rs index e22672aeb60..c3b400cd136 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/core/enr_fork_id.rs @@ -1,11 +1,10 @@ -use crate::Epoch; -use crate::test_utils::TestRandom; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, test_utils::TestRandom}; + /// Specifies a fork which allows nodes to identify each other on the network. This fork is used in /// a nodes local ENR. /// diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/core/eth_spec.rs similarity index 91% rename from consensus/types/src/eth_spec.rs rename to consensus/types/src/core/eth_spec.rs index e001cf0e4e9..74795fdfc31 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/core/eth_spec.rs @@ -1,16 +1,22 @@ -use crate::*; +use std::{ + fmt::{self, Debug}, + str::FromStr, +}; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::{ +use typenum::{ U0, U1, U2, U4, U8, U16, U17, U32, U64, U128, U256, U512, U625, U1024, U2048, U4096, U8192, U65536, U131072, U262144, U1048576, U16777216, U33554432, U134217728, U1073741824, - U1099511627776, UInt, bit::B0, + U1099511627776, UInt, Unsigned, bit::B0, +}; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, }; -use std::fmt::{self, Debug}; -use std::str::FromStr; -pub type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 +type U5000 = UInt, B0>, B0>; // 625 * 8 = 5000 const MAINNET: &str = "mainnet"; const MINIMAL: &str = "minimal"; @@ -165,6 +171,14 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + type MaxWithdrawalRequestsPerPayload: Unsigned + Clone + Sync + Send + Debug + PartialEq; type MaxPendingDepositsPerEpoch: Unsigned + Clone + Sync + Send + Debug + PartialEq; + /* + * New in Gloas + */ + type PTCSize: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type MaxPayloadAttestations: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type BuilderPendingPaymentsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + type BuilderPendingWithdrawalsLimit: Unsigned + Clone + Sync + Send + Debug + PartialEq; + fn default_spec() -> ChainSpec; fn spec_name() -> EthSpecId; @@ -182,7 +196,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn get_committee_count_per_slot( active_validator_count: usize, spec: &ChainSpec, - ) -> Result { + ) -> Result { Self::get_committee_count_per_slot_with( active_validator_count, spec.max_committees_per_slot, @@ -194,7 +208,7 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + active_validator_count: usize, max_committees_per_slot: usize, target_committee_size: usize, - ) -> Result { + ) -> Result { let slots_per_epoch = Self::SlotsPerEpoch::to_usize(); Ok(std::cmp::max( @@ -310,6 +324,11 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::BytesPerBlob::to_usize() } + /// Returns the `BYTES_PER_CELL` constant for this specification. + fn bytes_per_cell() -> usize { + Self::BytesPerCell::to_usize() + } + /// Returns the `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` preset for this specification. fn kzg_proof_inclusion_proof_depth() -> usize { Self::KzgCommitmentInclusionProofDepth::to_usize() @@ -346,6 +365,16 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + Self::PendingConsolidationsLimit::to_usize() } + /// Returns the `BUILDER_PENDING_PAYMENTS_LIMIT` constant for this specification. + fn builder_pending_payments_limit() -> usize { + Self::BuilderPendingPaymentsLimit::to_usize() + } + + /// Returns the `BUILDER_PENDING_WITHDRAWALS_LIMIT` constant for this specification. + fn builder_pending_withdrawals_limit() -> usize { + Self::BuilderPendingWithdrawalsLimit::to_usize() + } + /// Returns the `MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD` constant for this specification. fn max_consolidation_requests_per_payload() -> usize { Self::MaxConsolidationRequestsPerPayload::to_usize() @@ -391,6 +420,16 @@ pub trait EthSpec: 'static + Default + Sync + Send + Clone + Debug + PartialEq + fn proposer_lookahead_slots() -> usize { Self::ProposerLookaheadSlots::to_usize() } + + /// Returns the `PTCSize` constant for this specification. + fn ptc_size() -> usize { + Self::PTCSize::to_usize() + } + + /// Returns the `MaxPayloadAttestations` constant for this specification. + fn max_payload_attestations() -> usize { + Self::MaxPayloadAttestations::to_usize() + } } /// Macro to inherit some type values from another EthSpec. @@ -420,6 +459,8 @@ impl EthSpec for MainnetEthSpec { type EpochsPerSlashingsVector = U8192; type HistoricalRootsLimit = U16777216; type ValidatorRegistryLimit = U1099511627776; + type BuilderPendingPaymentsLimit = U64; // 2 * SLOTS_PER_EPOCH = 2 * 32 = 64 + type BuilderPendingWithdrawalsLimit = U1048576; type MaxProposerSlashings = U16; type MaxAttesterSlashings = U2; type MaxAttestations = U128; @@ -460,6 +501,8 @@ impl EthSpec for MainnetEthSpec { type MaxAttestationsElectra = U8; type MaxWithdrawalRequestsPerPayload = U16; type MaxPendingDepositsPerEpoch = U16; + type PTCSize = U512; + type MaxPayloadAttestations = U4; fn default_spec() -> ChainSpec { ChainSpec::mainnet() @@ -502,6 +545,7 @@ impl EthSpec for MinimalEthSpec { type CellsPerExtBlob = U128; type NumberOfColumns = U128; type ProposerLookaheadSlots = U16; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH + type BuilderPendingPaymentsLimit = U16; // 2 * SLOTS_PER_EPOCH = 2 * 8 = 16 params_from_eth_spec!(MainnetEthSpec { JustificationBitsLength, @@ -511,6 +555,7 @@ impl EthSpec for MinimalEthSpec { GenesisEpoch, HistoricalRootsLimit, ValidatorRegistryLimit, + BuilderPendingWithdrawalsLimit, MaxProposerSlashings, MaxAttesterSlashings, MaxAttestations, @@ -530,7 +575,9 @@ impl EthSpec for MinimalEthSpec { MaxAttesterSlashingsElectra, MaxAttestationsElectra, MaxDepositRequestsPerPayload, - MaxWithdrawalRequestsPerPayload + MaxWithdrawalRequestsPerPayload, + PTCSize, + MaxPayloadAttestations }); fn default_spec() -> ChainSpec { @@ -561,6 +608,8 @@ impl EthSpec for GnosisEthSpec { type EpochsPerSlashingsVector = U8192; type HistoricalRootsLimit = U16777216; type ValidatorRegistryLimit = U1099511627776; + type BuilderPendingPaymentsLimit = U32; // 2 * SLOTS_PER_EPOCH = 2 * 16 = 32 + type BuilderPendingWithdrawalsLimit = U1048576; type MaxProposerSlashings = U16; type MaxAttesterSlashings = U2; type MaxAttestations = U128; @@ -601,6 +650,8 @@ impl EthSpec for GnosisEthSpec { type CellsPerExtBlob = U128; type NumberOfColumns = U128; type ProposerLookaheadSlots = U32; // Derived from (MIN_SEED_LOOKAHEAD + 1) * SLOTS_PER_EPOCH + type PTCSize = U512; + type MaxPayloadAttestations = U2; fn default_spec() -> ChainSpec { ChainSpec::gnosis() @@ -614,7 +665,7 @@ impl EthSpec for GnosisEthSpec { #[cfg(test)] mod test { use crate::{EthSpec, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; - use ssz_types::typenum::Unsigned; + use typenum::Unsigned; fn assert_valid_spec() { let spec = E::default_spec(); diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/core/graffiti.rs similarity index 98% rename from consensus/types/src/graffiti.rs rename to consensus/types/src/core/graffiti.rs index 31cc4187a67..d0e0e1b1a89 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/core/graffiti.rs @@ -1,14 +1,13 @@ -use crate::{ - Hash256, - test_utils::{RngCore, TestRandom}, -}; +use std::{fmt, str::FromStr}; + +use rand::RngCore; use regex::bytes::Regex; use serde::{Deserialize, Deserializer, Serialize, Serializer, de::Error}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::str::FromStr; use tree_hash::{PackedEncoding, TreeHash}; +use crate::{core::Hash256, test_utils::TestRandom}; + pub const GRAFFITI_BYTES_LEN: usize = 32; /// The 32-byte `graffiti` field on a beacon block. diff --git a/consensus/types/src/core/mod.rs b/consensus/types/src/core/mod.rs new file mode 100644 index 00000000000..bb50bb18568 --- /dev/null +++ b/consensus/types/src/core/mod.rs @@ -0,0 +1,44 @@ +pub mod consts; + +mod application_domain; +mod chain_spec; +mod config_and_preset; +mod enr_fork_id; +mod eth_spec; +mod graffiti; +mod non_zero_usize; +mod preset; +mod relative_epoch; +mod signing_data; +mod slot_data; +#[macro_use] +mod slot_epoch_macros; +mod slot_epoch; +#[cfg(feature = "sqlite")] +mod sqlite; + +pub use application_domain::{APPLICATION_DOMAIN_BUILDER, ApplicationDomain}; +pub use chain_spec::{BlobParameters, BlobSchedule, ChainSpec, Config, Domain}; +pub use config_and_preset::{ + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, + ConfigAndPresetGloas, get_extra_fields, +}; +pub use enr_fork_id::EnrForkId; +pub use eth_spec::{EthSpec, EthSpecId, GNOSIS, GnosisEthSpec, MainnetEthSpec, MinimalEthSpec}; +pub use graffiti::{GRAFFITI_BYTES_LEN, Graffiti, GraffitiString}; +pub use non_zero_usize::new_non_zero_usize; +pub use preset::{ + AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, GloasPreset, +}; +pub use relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use signing_data::{SignedRoot, SigningData}; +pub use slot_data::SlotData; +pub use slot_epoch::{Epoch, Slot}; + +pub type Hash256 = alloy_primitives::B256; +pub type Uint256 = alloy_primitives::U256; +pub type Hash64 = alloy_primitives::B64; +pub type Address = alloy_primitives::Address; +pub type VersionedHash = Hash256; +pub type MerkleProof = Vec; diff --git a/consensus/types/src/non_zero_usize.rs b/consensus/types/src/core/non_zero_usize.rs similarity index 100% rename from consensus/types/src/non_zero_usize.rs rename to consensus/types/src/core/non_zero_usize.rs diff --git a/consensus/types/src/preset.rs b/consensus/types/src/core/preset.rs similarity index 99% rename from consensus/types/src/preset.rs rename to consensus/types/src/core/preset.rs index ab54c0345f7..75d2d8df6b3 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/core/preset.rs @@ -1,5 +1,7 @@ -use crate::{ChainSpec, Epoch, EthSpec, Unsigned}; use serde::{Deserialize, Serialize}; +use typenum::Unsigned; + +use crate::core::{ChainSpec, Epoch, EthSpec}; /// Value-level representation of an Ethereum consensus "preset". /// diff --git a/consensus/types/src/relative_epoch.rs b/consensus/types/src/core/relative_epoch.rs similarity index 99% rename from consensus/types/src/relative_epoch.rs rename to consensus/types/src/core/relative_epoch.rs index 2fa0ae41bda..d1ee7ecc7c6 100644 --- a/consensus/types/src/relative_epoch.rs +++ b/consensus/types/src/core/relative_epoch.rs @@ -1,6 +1,7 @@ -use crate::*; use safe_arith::{ArithError, SafeArith}; +use crate::core::{Epoch, Slot}; + #[derive(Debug, PartialEq, Clone, Copy)] pub enum Error { EpochTooLow { base: Epoch, other: Epoch }, diff --git a/consensus/types/src/signing_data.rs b/consensus/types/src/core/signing_data.rs similarity index 85% rename from consensus/types/src/signing_data.rs rename to consensus/types/src/core/signing_data.rs index 69b7dabfe5a..907f03fac7b 100644 --- a/consensus/types/src/signing_data.rs +++ b/consensus/types/src/core/signing_data.rs @@ -1,13 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] #[context_deserialize(ForkName)] diff --git a/consensus/types/src/slot_data.rs b/consensus/types/src/core/slot_data.rs similarity index 92% rename from consensus/types/src/slot_data.rs rename to consensus/types/src/core/slot_data.rs index 19775913b98..f0bd01814f2 100644 --- a/consensus/types/src/slot_data.rs +++ b/consensus/types/src/core/slot_data.rs @@ -1,4 +1,4 @@ -use crate::Slot; +use crate::core::Slot; /// A trait providing a `Slot` getter for messages that are related to a single slot. Useful in /// making parts of attestation and sync committee processing generic. diff --git a/consensus/types/src/slot_epoch.rs b/consensus/types/src/core/slot_epoch.rs similarity index 98% rename from consensus/types/src/slot_epoch.rs rename to consensus/types/src/core/slot_epoch.rs index 05af9c5232d..97457701b11 100644 --- a/consensus/types/src/slot_epoch.rs +++ b/consensus/types/src/core/slot_epoch.rs @@ -10,15 +10,17 @@ //! implement `Into`, however this would allow operations between `Slots` and `Epochs` which //! may lead to programming errors which are not detected by the compiler. -use crate::test_utils::TestRandom; -use crate::{ChainSpec, SignedRoot}; +use std::{fmt, hash::Hash}; use rand::RngCore; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; -use std::hash::Hash; + +use crate::{ + core::{ChainSpec, SignedRoot}, + test_utils::TestRandom, +}; #[cfg(feature = "legacy-arith")] use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, Sub, SubAssign}; diff --git a/consensus/types/src/slot_epoch_macros.rs b/consensus/types/src/core/slot_epoch_macros.rs similarity index 100% rename from consensus/types/src/slot_epoch_macros.rs rename to consensus/types/src/core/slot_epoch_macros.rs diff --git a/consensus/types/src/sqlite.rs b/consensus/types/src/core/sqlite.rs similarity index 96% rename from consensus/types/src/sqlite.rs rename to consensus/types/src/core/sqlite.rs index b6318dc4ce5..de892b4e98f 100644 --- a/consensus/types/src/sqlite.rs +++ b/consensus/types/src/core/sqlite.rs @@ -1,10 +1,11 @@ //! Implementations of SQLite compatibility traits. -use crate::{Epoch, Slot}; use rusqlite::{ Error, types::{FromSql, FromSqlError, ToSql, ToSqlOutput, ValueRef}, }; +use crate::core::{Epoch, Slot}; + macro_rules! impl_to_from_sql { ($type:ty) => { impl ToSql for $type { diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/data/blob_sidecar.rs similarity index 93% rename from consensus/types/src/blob_sidecar.rs rename to consensus/types/src/data/blob_sidecar.rs index 2e8c2578976..709e556933b 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/data/blob_sidecar.rs @@ -1,13 +1,8 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - AbstractExecPayload, BeaconBlockHeader, BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, - FixedVector, ForkName, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, - SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; + use bls::Signature; -use derivative::Derivative; +use context_deserialize::context_deserialize; +use educe::Educe; use kzg::{BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT, Blob as KzgBlob, Kzg, KzgCommitment, KzgProof}; use merkle_proof::{MerkleTreeError, merkle_root_from_branch, verify_merkle_proof}; use rand::Rng; @@ -15,13 +10,24 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use std::fmt::Debug; -use std::hash::Hash; -use std::sync::Arc; +use ssz_types::{FixedVector, RuntimeFixedVector, RuntimeVariableList, VariableList}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{ + BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlock, SignedBeaconBlockHeader, + }, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + data::Blob, + execution::AbstractExecPayload, + fork::ForkName, + kzg_ext::KzgProofs, + state::BeaconStateError, + test_utils::TestRandom, +}; + /// Container of the data that identifies an individual blob. #[derive( Serialize, Deserialize, Encode, Decode, TreeHash, Copy, Clone, Debug, PartialEq, Eq, Hash, @@ -49,12 +55,10 @@ impl Ord for BlobIdentifier { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] #[context_deserialize(ForkName)] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] pub struct BlobSidecar { #[serde(with = "serde_utils::quoted_u64")] pub index: u64, diff --git a/consensus/types/src/data_column_custody_group.rs b/consensus/types/src/data/data_column_custody_group.rs similarity index 98% rename from consensus/types/src/data_column_custody_group.rs rename to consensus/types/src/data/data_column_custody_group.rs index 7ecabab0abc..d96d13cfff6 100644 --- a/consensus/types/src/data_column_custody_group.rs +++ b/consensus/types/src/data/data_column_custody_group.rs @@ -1,8 +1,14 @@ -use crate::{ChainSpec, ColumnIndex, DataColumnSubnetId, EthSpec}; +use std::collections::HashSet; + use alloy_primitives::U256; use itertools::Itertools; use safe_arith::{ArithError, SafeArith}; -use std::collections::HashSet; + +use crate::{ + EthSpec, + core::ChainSpec, + data::{ColumnIndex, DataColumnSubnetId}, +}; pub type CustodyIndex = u64; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data/data_column_sidecar.rs similarity index 91% rename from consensus/types/src/data_column_sidecar.rs rename to consensus/types/src/data/data_column_sidecar.rs index 2272b1695c9..71d821f83ef 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data/data_column_sidecar.rs @@ -1,13 +1,8 @@ -use crate::beacon_block_body::{BLOB_KZG_COMMITMENTS_INDEX, KzgCommitments}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, BeaconStateError, Epoch, EthSpec, ForkName, Hash256, - SignedBeaconBlockHeader, Slot, -}; +use std::sync::Arc; + use bls::Signature; -use derivative::Derivative; -use kzg::Error as KzgError; +use context_deserialize::context_deserialize; +use educe::Educe; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; use safe_arith::ArithError; @@ -16,11 +11,19 @@ use ssz::Encode; use ssz_derive::{Decode, Encode}; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; -use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + block::{BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, SignedBeaconBlockHeader}, + core::{Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + kzg_ext::{KzgCommitments, KzgError}, + state::BeaconStateError, + test_utils::TestRandom, +}; + pub type ColumnIndex = u64; pub type Cell = FixedVector::BytesPerCell>; pub type DataColumn = VariableList, ::MaxBlobCommitmentsPerBlock>; @@ -40,11 +43,9 @@ pub type DataColumnSidecarList = Vec>>; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[context_deserialize(ForkName)] pub struct DataColumnSidecar { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/data_column_subnet_id.rs b/consensus/types/src/data/data_column_subnet_id.rs similarity index 73% rename from consensus/types/src/data_column_subnet_id.rs rename to consensus/types/src/data/data_column_subnet_id.rs index 4061cb4fdb0..c30ebbba20e 100644 --- a/consensus/types/src/data_column_subnet_id.rs +++ b/consensus/types/src/data/data_column_subnet_id.rs @@ -1,18 +1,25 @@ //! Identifies each data column subnet by an integer identifier. -use crate::ChainSpec; -use crate::data_column_sidecar::ColumnIndex; -use derivative::Derivative; -use safe_arith::{ArithError, SafeArith}; +use std::{ + fmt::{self, Display}, + ops::{Deref, DerefMut}, +}; + +use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; + +use crate::{core::ChainSpec, data::ColumnIndex}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Clone, Copy, Derivative, PartialEq, Eq, Hash, Serialize, Deserialize)] -#[derivative(Debug = "transparent")] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(transparent)] pub struct DataColumnSubnetId(#[serde(with = "serde_utils::quoted_u64")] u64); +impl fmt::Debug for DataColumnSubnetId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + impl DataColumnSubnetId { pub fn new(id: u64) -> Self { id.into() @@ -65,15 +72,3 @@ impl From<&DataColumnSubnetId> for u64 { val.0 } } - -#[derive(Debug)] -pub enum Error { - ArithError(ArithError), - InvalidCustodySubnetCount(u64), -} - -impl From for Error { - fn from(e: ArithError) -> Self { - Error::ArithError(e) - } -} diff --git a/consensus/types/src/data/mod.rs b/consensus/types/src/data/mod.rs new file mode 100644 index 00000000000..10d062bada9 --- /dev/null +++ b/consensus/types/src/data/mod.rs @@ -0,0 +1,23 @@ +mod blob_sidecar; +mod data_column_custody_group; +mod data_column_sidecar; +mod data_column_subnet_id; + +pub use blob_sidecar::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobSidecarList, BlobsList, FixedBlobSidecarList, +}; +pub use data_column_custody_group::{ + CustodyIndex, DataColumnCustodyGroupError, compute_columns_for_custody_group, + compute_ordered_custody_column_indices, compute_subnets_for_node, + compute_subnets_from_custody_group, get_custody_groups, +}; +pub use data_column_sidecar::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, DataColumnsByRootIdentifier, +}; +pub use data_column_subnet_id::DataColumnSubnetId; + +use crate::core::EthSpec; +use ssz_types::FixedVector; + +pub type Blob = FixedVector::BytesPerBlob>; diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit/deposit.rs similarity index 78% rename from consensus/types/src/deposit.rs rename to consensus/types/src/deposit/deposit.rs index 724f3de2f07..0b08bd6509f 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit/deposit.rs @@ -1,11 +1,12 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::U33; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::U33; + +use crate::{core::Hash256, deposit::DepositData, fork::ForkName, test_utils::TestRandom}; pub const DEPOSIT_TREE_DEPTH: usize = 32; diff --git a/consensus/types/src/deposit_data.rs b/consensus/types/src/deposit/deposit_data.rs similarity index 86% rename from consensus/types/src/deposit_data.rs rename to consensus/types/src/deposit/deposit_data.rs index 3d9ae128088..51697f5d1a2 100644 --- a/consensus/types/src/deposit_data.rs +++ b/consensus/types/src/deposit/deposit_data.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Hash256, SignedRoot}, + deposit::DepositMessage, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_message.rs b/consensus/types/src/deposit/deposit_message.rs similarity index 81% rename from consensus/types/src/deposit_message.rs rename to consensus/types/src/deposit/deposit_message.rs index 9fe3b878858..4495a5c0236 100644 --- a/consensus/types/src/deposit_message.rs +++ b/consensus/types/src/deposit/deposit_message.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// The data supplied by the user to the deposit contract. /// /// Spec v0.12.1 diff --git a/consensus/types/src/deposit_request.rs b/consensus/types/src/deposit/deposit_request.rs similarity index 86% rename from consensus/types/src/deposit_request.rs rename to consensus/types/src/deposit/deposit_request.rs index 16acfb3b443..8d3c6e88bae 100644 --- a/consensus/types/src/deposit_request.rs +++ b/consensus/types/src/deposit/deposit_request.rs @@ -1,13 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, PublicKeyBytes}; -use bls::SignatureBytes; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit/deposit_tree_snapshot.rs similarity index 95% rename from consensus/types/src/deposit_tree_snapshot.rs rename to consensus/types/src/deposit/deposit_tree_snapshot.rs index 400fca217da..24f41397a0a 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit/deposit_tree_snapshot.rs @@ -1,10 +1,11 @@ -use crate::*; use ethereum_hashing::{ZERO_HASHES, hash32_concat}; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::int_to_bytes32; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; -use test_utils::TestRandom; + +use crate::{core::Hash256, deposit::DEPOSIT_TREE_DEPTH, test_utils::TestRandom}; #[derive(Encode, Decode, Deserialize, Serialize, Clone, Debug, PartialEq, TestRandom)] pub struct FinalizedExecutionBlock { diff --git a/consensus/types/src/deposit/mod.rs b/consensus/types/src/deposit/mod.rs new file mode 100644 index 00000000000..ff80f65cdb3 --- /dev/null +++ b/consensus/types/src/deposit/mod.rs @@ -0,0 +1,13 @@ +mod deposit; +mod deposit_data; +mod deposit_message; +mod deposit_request; +mod deposit_tree_snapshot; +mod pending_deposit; + +pub use deposit::{DEPOSIT_TREE_DEPTH, Deposit}; +pub use deposit_data::DepositData; +pub use deposit_message::DepositMessage; +pub use deposit_request::DepositRequest; +pub use deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; +pub use pending_deposit::PendingDeposit; diff --git a/consensus/types/src/pending_deposit.rs b/consensus/types/src/deposit/pending_deposit.rs similarity index 78% rename from consensus/types/src/pending_deposit.rs rename to consensus/types/src/deposit/pending_deposit.rs index 4a921edd549..4c039af39cd 100644 --- a/consensus/types/src/pending_deposit.rs +++ b/consensus/types/src/deposit/pending_deposit.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SignatureBytes}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/bls_to_execution_change.rs b/consensus/types/src/execution/bls_to_execution_change.rs similarity index 83% rename from consensus/types/src/bls_to_execution_change.rs rename to consensus/types/src/execution/bls_to_execution_change.rs index 72d737ac714..de14f1b4c5d 100644 --- a/consensus/types/src/bls_to_execution_change.rs +++ b/consensus/types/src/execution/bls_to_execution_change.rs @@ -1,10 +1,17 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::{PublicKeyBytes, SecretKey}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, ChainSpec, Domain, Hash256, SignedRoot}, + execution::SignedBlsToExecutionChange, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/execution/dumb_macros.rs b/consensus/types/src/execution/dumb_macros.rs new file mode 100644 index 00000000000..4eae416bb56 --- /dev/null +++ b/consensus/types/src/execution/dumb_macros.rs @@ -0,0 +1,108 @@ +// These would usually be created by superstuct but now there's no longer a 1:1 mapping between +// the variants for ExecutionPayload and the variants for +// - ExecutionPayloadHeader +// - FullPayload +// - BlindedPayload +// TODO(EIP-7732): get rid of this whole file and panics once the engine_api is refactored for ePBS + +#[macro_export] +macro_rules! map_execution_payload_into_full_payload { + ($value:expr, $f:expr) => { + match $value { + ExecutionPayload::Bellatrix(inner) => { + let f: fn(ExecutionPayloadBellatrix<_>, fn(_) -> _) -> _ = $f; + f(inner, FullPayload::Bellatrix) + } + ExecutionPayload::Capella(inner) => { + let f: fn(ExecutionPayloadCapella<_>, fn(_) -> _) -> _ = $f; + f(inner, FullPayload::Capella) + } + ExecutionPayload::Deneb(inner) => { + let f: fn(ExecutionPayloadDeneb<_>, fn(_) -> _) -> _ = $f; + f(inner, FullPayload::Deneb) + } + ExecutionPayload::Electra(inner) => { + let f: fn(ExecutionPayloadElectra<_>, fn(_) -> _) -> _ = $f; + f(inner, FullPayload::Electra) + } + ExecutionPayload::Fulu(inner) => { + let f: fn(ExecutionPayloadFulu<_>, fn(_) -> _) -> _ = $f; + f(inner, FullPayload::Fulu) + } + ExecutionPayload::Gloas(_) => panic!("FullPayload::Gloas does not exist!"), + } + }; +} + +#[macro_export] +macro_rules! map_execution_payload_into_blinded_payload { + ($value:expr, $f:expr) => { + match $value { + ExecutionPayload::Bellatrix(inner) => { + let f: fn(ExecutionPayloadBellatrix<_>, fn(_) -> _) -> _ = $f; + f(inner, BlindedPayload::Bellatrix) + } + ExecutionPayload::Capella(inner) => { + let f: fn(ExecutionPayloadCapella<_>, fn(_) -> _) -> _ = $f; + f(inner, BlindedPayload::Capella) + } + ExecutionPayload::Deneb(inner) => { + let f: fn(ExecutionPayloadDeneb<_>, fn(_) -> _) -> _ = $f; + f(inner, BlindedPayload::Deneb) + } + ExecutionPayload::Electra(inner) => { + let f: fn(ExecutionPayloadElectra<_>, fn(_) -> _) -> _ = $f; + f(inner, BlindedPayload::Electra) + } + ExecutionPayload::Fulu(inner) => { + let f: fn(ExecutionPayloadFulu<_>, fn(_) -> _) -> _ = $f; + f(inner, BlindedPayload::Fulu) + } + ExecutionPayload::Gloas(_) => panic!("BlindedPayload::Gloas does not exist!"), + } + }; +} + +#[macro_export] +macro_rules! map_execution_payload_ref_into_execution_payload_header { + (&$lifetime:tt _, $value:expr, $f:expr) => { + match $value { + ExecutionPayloadRef::Bellatrix(inner) => { + let f: fn( + &$lifetime ExecutionPayloadBellatrix<_>, + fn(_) -> _, + ) -> _ = $f; + f(inner, ExecutionPayloadHeader::Bellatrix) + } + ExecutionPayloadRef::Capella(inner) => { + let f: fn( + &$lifetime ExecutionPayloadCapella<_>, + fn(_) -> _, + ) -> _ = $f; + f(inner, ExecutionPayloadHeader::Capella) + } + ExecutionPayloadRef::Deneb(inner) => { + let f: fn( + &$lifetime ExecutionPayloadDeneb<_>, + fn(_) -> _, + ) -> _ = $f; + f(inner, ExecutionPayloadHeader::Deneb) + } + ExecutionPayloadRef::Electra(inner) => { + let f: fn( + &$lifetime ExecutionPayloadElectra<_>, + fn(_) -> _, + ) -> _ = $f; + f(inner, ExecutionPayloadHeader::Electra) + } + ExecutionPayloadRef::Fulu(inner) => { + let f: fn( + &$lifetime ExecutionPayloadFulu<_>, + fn(_) -> _, + ) -> _ = $f; + f(inner, ExecutionPayloadHeader::Fulu) + } + ExecutionPayloadRef::Gloas(_) => panic!("ExecutionPayloadHeader::Gloas does not exist!"), + } + } +} diff --git a/consensus/types/src/eth1_data.rs b/consensus/types/src/execution/eth1_data.rs similarity index 86% rename from consensus/types/src/eth1_data.rs rename to consensus/types/src/execution/eth1_data.rs index 800f3e25f94..89a4e634a66 100644 --- a/consensus/types/src/eth1_data.rs +++ b/consensus/types/src/execution/eth1_data.rs @@ -1,12 +1,11 @@ -use super::Hash256; -use crate::ForkName; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Hash256, fork::ForkName, test_utils::TestRandom}; + /// Contains data obtained from the Eth1 chain. /// /// Spec v0.12.1 diff --git a/consensus/types/src/execution_block_hash.rs b/consensus/types/src/execution/execution_block_hash.rs similarity index 91% rename from consensus/types/src/execution_block_hash.rs rename to consensus/types/src/execution/execution_block_hash.rs index d3065afbbb0..91c019ce040 100644 --- a/consensus/types/src/execution_block_hash.rs +++ b/consensus/types/src/execution/execution_block_hash.rs @@ -1,18 +1,23 @@ -use crate::FixedBytesExtended; -use crate::Hash256; -use crate::test_utils::TestRandom; -use derivative::Derivative; +use std::fmt; + +use fixed_bytes::FixedBytesExtended; use rand::RngCore; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode}; -use std::fmt; + +use crate::{core::Hash256, test_utils::TestRandom}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] -#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash, Derivative)] -#[derivative(Debug = "transparent")] +#[derive(Default, Clone, Copy, Serialize, Deserialize, Eq, PartialEq, Hash)] #[serde(transparent)] pub struct ExecutionBlockHash(#[serde(with = "serde_utils::b256_hex")] pub Hash256); +impl fmt::Debug for ExecutionBlockHash { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + std::fmt::Debug::fmt(&self.0, f) + } +} + impl ExecutionBlockHash { pub fn zero() -> Self { Self(Hash256::zero()) diff --git a/consensus/types/src/execution_block_header.rs b/consensus/types/src/execution/execution_block_header.rs similarity index 98% rename from consensus/types/src/execution_block_header.rs rename to consensus/types/src/execution/execution_block_header.rs index 02152adbf73..e596ba1831d 100644 --- a/consensus/types/src/execution_block_header.rs +++ b/consensus/types/src/execution/execution_block_header.rs @@ -17,10 +17,15 @@ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. -use crate::{Address, EthSpec, ExecutionPayloadRef, Hash64, Hash256, Uint256}; use alloy_rlp::RlpEncodable; +use fixed_bytes::Uint256; use metastruct::metastruct; +use crate::{ + core::{Address, EthSpec, Hash64, Hash256}, + execution::ExecutionPayloadRef, +}; + /// Execution block header as used for RLP encoding and Keccak hashing. /// /// Credit to Reth for the type definition. diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution/execution_payload.rs similarity index 88% rename from consensus/types/src/execution_payload.rs rename to consensus/types/src/execution/execution_payload.rs index 7a899e5f022..b2278c91667 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution/execution_payload.rs @@ -1,19 +1,29 @@ -use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; +use fixed_bytes::Uint256; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::ExecutionBlockHash, + fork::{ForkName, ForkVersionDecode}, + state::BeaconStateError, + test_utils::TestRandom, + withdrawal::Withdrawals, +}; + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, ::MaxTransactionsPerPayload, >; -pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; - #[superstruct( variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( @@ -27,10 +37,10 @@ pub type Withdrawals = VariableList::MaxWithdrawal Decode, TreeHash, TestRandom, - Derivative, + Educe, ), context_deserialize(ForkName), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -38,18 +48,22 @@ pub type Withdrawals = VariableList::MaxWithdrawal arbitrary(bound = "E: EthSpec"), ), ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - map_into(FullPayload, BlindedPayload), - map_ref_into(ExecutionPayloadHeader) + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] @@ -130,6 +144,7 @@ impl ForkVersionDecode for ExecutionPayload { impl ExecutionPayload { #[allow(clippy::arithmetic_side_effects)] /// Returns the maximum size of an execution payload. + /// TODO(EIP-7732): this seems to only exist for the Bellatrix fork, but Mark's branch has it for all the forks, i.e. max_execution_payload_eip7732_size pub fn max_execution_payload_bellatrix_size() -> usize { // Fixed part ExecutionPayloadBellatrix::::default().as_ssz_bytes().len() diff --git a/consensus/types/src/execution/execution_payload_bid.rs b/consensus/types/src/execution/execution_payload_bid.rs new file mode 100644 index 00000000000..20e461334d3 --- /dev/null +++ b/consensus/types/src/execution/execution_payload_bid.rs @@ -0,0 +1,40 @@ +use crate::test_utils::TestRandom; +use crate::{Address, ExecutionBlockHash, ForkName, Hash256, SignedRoot, Slot}; +use context_deserialize::context_deserialize; +use educe::Educe; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive( + Default, Debug, Clone, Serialize, Encode, Decode, Deserialize, TreeHash, Educe, TestRandom, +)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[educe(PartialEq, Hash)] +#[context_deserialize(ForkName)] +// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#executionpayloadbid +pub struct ExecutionPayloadBid { + pub parent_block_hash: ExecutionBlockHash, + pub parent_block_root: Hash256, + pub block_hash: ExecutionBlockHash, + #[serde(with = "serde_utils::address_hex")] + pub fee_recipient: Address, + #[serde(with = "serde_utils::quoted_u64")] + pub gas_limit: u64, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub slot: Slot, + #[serde(with = "serde_utils::quoted_u64")] + pub value: u64, + pub blob_kzg_commitments_root: Hash256, +} + +impl SignedRoot for ExecutionPayloadBid {} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(ExecutionPayloadBid); +} diff --git a/consensus/types/src/execution/execution_payload_envelope.rs b/consensus/types/src/execution/execution_payload_envelope.rs new file mode 100644 index 00000000000..64e03cec5a9 --- /dev/null +++ b/consensus/types/src/execution/execution_payload_envelope.rs @@ -0,0 +1,36 @@ +use crate::test_utils::TestRandom; +use crate::{ + EthSpec, ExecutionPayloadGloas, ExecutionRequests, ForkName, Hash256, KzgCommitments, + SignedRoot, Slot, +}; +use context_deserialize::context_deserialize; +use educe::Educe; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(Debug, Clone, Serialize, Encode, Decode, Deserialize, TestRandom, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] +#[context_deserialize(ForkName)] +#[serde(bound = "E: EthSpec")] +pub struct ExecutionPayloadEnvelope { + pub payload: ExecutionPayloadGloas, + pub execution_requests: ExecutionRequests, + #[serde(with = "serde_utils::quoted_u64")] + pub builder_index: u64, + pub beacon_block_root: Hash256, + pub slot: Slot, + pub blob_kzg_commitments: KzgCommitments, + pub state_root: Hash256, +} + +impl SignedRoot for ExecutionPayloadEnvelope {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_and_tree_hash_tests!(ExecutionPayloadEnvelope); +} diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution/execution_payload_header.rs similarity index 83% rename from consensus/types/src/execution_payload_header.rs rename to consensus/types/src/execution/execution_payload_header.rs index 2f5fac87a9a..cf78f7871b4 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution/execution_payload_header.rs @@ -1,14 +1,30 @@ -use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::{FixedVector, VariableList}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256, Uint256}, + execution::{ + ExecutionBlockHash, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, + Transactions, + }, + fork::ForkName, + map_execution_payload_ref_into_execution_payload_header, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Default, @@ -20,9 +36,9 @@ use tree_hash_derive::TreeHash; Decode, TreeHash, TestRandom, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -35,8 +51,14 @@ use tree_hash_derive::TreeHash; derive(PartialEq, TreeHash, Debug), tree_hash(enum_behaviour = "transparent") ), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_into(ExecutionPayloadHeader) )] #[cfg_attr( @@ -44,8 +66,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] @@ -84,12 +106,12 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] pub withdrawals_root: Hash256, - #[superstruct(only(Deneb, Electra, Fulu, Gloas), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra, Fulu, Gloas), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, } @@ -115,14 +137,19 @@ impl ExecutionPayloadHeader { ExecutionPayloadHeaderElectra::from_ssz_bytes(bytes).map(Self::Electra) } ForkName::Fulu => ExecutionPayloadHeaderFulu::from_ssz_bytes(bytes).map(Self::Fulu), - ForkName::Gloas => ExecutionPayloadHeaderGloas::from_ssz_bytes(bytes).map(Self::Gloas), + ForkName::Gloas => Err(ssz::DecodeError::BytesInvalid(format!( + "unsupported fork for ExecutionPayloadHeader: {fork_name}", + ))), } } #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { // TODO(newfork): Add a new case here if there are new variable fields - if fork_name.bellatrix_enabled() { + if fork_name.gloas_enabled() { + // TODO(EIP7732): check this + 0 + } else if fork_name.bellatrix_enabled() { // Max size of variable length `extra_data` field E::max_extra_data_bytes() * ::ssz_fixed_len() } else { @@ -137,7 +164,6 @@ impl ExecutionPayloadHeader { ExecutionPayloadHeader::Deneb(_) => ForkName::Deneb, ExecutionPayloadHeader::Electra(_) => ForkName::Electra, ExecutionPayloadHeader::Fulu(_) => ForkName::Fulu, - ExecutionPayloadHeader::Gloas(_) => ForkName::Gloas, } } } @@ -245,30 +271,6 @@ impl ExecutionPayloadHeaderElectra { } } -impl ExecutionPayloadHeaderFulu { - pub fn upgrade_to_gloas(&self) -> ExecutionPayloadHeaderGloas { - ExecutionPayloadHeaderGloas { - parent_hash: self.parent_hash, - fee_recipient: self.fee_recipient, - state_root: self.state_root, - receipts_root: self.receipts_root, - logs_bloom: self.logs_bloom.clone(), - prev_randao: self.prev_randao, - block_number: self.block_number, - gas_limit: self.gas_limit, - gas_used: self.gas_used, - timestamp: self.timestamp, - extra_data: self.extra_data.clone(), - base_fee_per_gas: self.base_fee_per_gas, - block_hash: self.block_hash, - transactions_root: self.transactions_root, - withdrawals_root: self.withdrawals_root, - blob_gas_used: self.blob_gas_used, - excess_blob_gas: self.excess_blob_gas, - } - } -} - impl<'a, E: EthSpec> From<&'a ExecutionPayloadBellatrix> for ExecutionPayloadHeaderBellatrix { fn from(payload: &'a ExecutionPayloadBellatrix) -> Self { Self { @@ -384,30 +386,6 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadFulu> for ExecutionPayloadHeade } } -impl<'a, E: EthSpec> From<&'a ExecutionPayloadGloas> for ExecutionPayloadHeaderGloas { - fn from(payload: &'a ExecutionPayloadGloas) -> Self { - Self { - parent_hash: payload.parent_hash, - fee_recipient: payload.fee_recipient, - state_root: payload.state_root, - receipts_root: payload.receipts_root, - logs_bloom: payload.logs_bloom.clone(), - prev_randao: payload.prev_randao, - block_number: payload.block_number, - gas_limit: payload.gas_limit, - gas_used: payload.gas_used, - timestamp: payload.timestamp, - extra_data: payload.extra_data.clone(), - base_fee_per_gas: payload.base_fee_per_gas, - block_hash: payload.block_hash, - transactions_root: payload.transactions.tree_hash_root(), - withdrawals_root: payload.withdrawals.tree_hash_root(), - blob_gas_used: payload.blob_gas_used, - excess_blob_gas: payload.excess_blob_gas, - } - } -} - // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderBellatrix { @@ -440,12 +418,6 @@ impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderFulu { } } -impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderGloas { - fn from(payload: &'a Self) -> Self { - payload.clone() - } -} - impl<'a, E: EthSpec> From> for ExecutionPayloadHeader { fn from(payload: ExecutionPayloadRef<'a, E>) -> Self { map_execution_payload_ref_into_execution_payload_header!( @@ -507,9 +479,6 @@ impl ExecutionPayloadHeaderRefMut<'_, E> { ExecutionPayloadHeaderRefMut::Fulu(mut_ref) => { *mut_ref = header.try_into()?; } - ExecutionPayloadHeaderRefMut::Gloas(mut_ref) => { - *mut_ref = header.try_into()?; - } } Ok(()) } @@ -537,16 +506,6 @@ impl TryFrom> for ExecutionPayloadHeaderFu } } -impl TryFrom> for ExecutionPayloadHeaderGloas { - type Error = BeaconStateError; - fn try_from(header: ExecutionPayloadHeader) -> Result { - match header { - ExecutionPayloadHeader::Gloas(execution_payload_header) => Ok(execution_payload_header), - _ => Err(BeaconStateError::IncorrectStateVariant), - } - } -} - impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadHeader { fn context_deserialize(deserializer: D, context: ForkName) -> Result where @@ -559,12 +518,6 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadHead )) }; Ok(match context { - ForkName::Base | ForkName::Altair => { - return Err(serde::de::Error::custom(format!( - "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", - context - ))); - } ForkName::Bellatrix => { Self::Bellatrix(Deserialize::deserialize(deserializer).map_err(convert_err)?) } @@ -580,8 +533,12 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for ExecutionPayloadHead ForkName::Fulu => { Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } - ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) + + ForkName::Base | ForkName::Altair | ForkName::Gloas => { + return Err(serde::de::Error::custom(format!( + "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", + context + ))); } }) } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution/execution_requests.rs similarity index 89% rename from consensus/types/src/execution_requests.rs rename to consensus/types/src/execution/execution_requests.rs index 592dda5d5e1..92d717778e3 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution/execution_requests.rs @@ -1,8 +1,6 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ConsolidationRequest, DepositRequest, EthSpec, ForkName, Hash256, WithdrawalRequest}; use alloy_primitives::Bytes; -use derivative::Derivative; +use context_deserialize::context_deserialize; +use educe::Educe; use ethereum_hashing::{DynamicContext, Sha256Context}; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -11,6 +9,15 @@ use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + consolidation::ConsolidationRequest, + core::{EthSpec, Hash256}, + deposit::DepositRequest, + fork::ForkName, + test_utils::TestRandom, + withdrawal::WithdrawalRequest, +}; + pub type DepositRequests = VariableList::MaxDepositRequestsPerPayload>; pub type WithdrawalRequests = @@ -24,10 +31,10 @@ pub type ConsolidationRequests = arbitrary(bound = "E: EthSpec") )] #[derive( - Debug, Derivative, Default, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, + Debug, Educe, Default, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, )] #[serde(bound = "E: EthSpec")] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[context_deserialize(ForkName)] pub struct ExecutionRequests { pub deposits: DepositRequests, diff --git a/consensus/types/src/execution/mod.rs b/consensus/types/src/execution/mod.rs new file mode 100644 index 00000000000..da6c8606002 --- /dev/null +++ b/consensus/types/src/execution/mod.rs @@ -0,0 +1,45 @@ +mod eth1_data; +mod execution_block_hash; +mod execution_block_header; +#[macro_use] +mod execution_payload; +mod bls_to_execution_change; +mod dumb_macros; +mod execution_payload_bid; +mod execution_payload_envelope; +mod execution_payload_header; +mod execution_requests; +mod payload; +mod signed_bls_to_execution_change; +mod signed_execution_payload_bid; +mod signed_execution_payload_envelope; + +pub use bls_to_execution_change::BlsToExecutionChange; +pub use eth1_data::Eth1Data; +pub use execution_block_hash::ExecutionBlockHash; +pub use execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; +pub use execution_payload::{ + ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, + Transaction, Transactions, +}; +pub use execution_payload_bid::ExecutionPayloadBid; +pub use execution_payload_envelope::ExecutionPayloadEnvelope; +pub use execution_payload_header::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, +}; +pub use execution_requests::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, +}; +pub use payload::{ + AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadRef, + BlockProductionVersion, BlockType, ExecPayload, FullPayload, FullPayloadBellatrix, + FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadRef, + OwnedExecPayload, +}; +pub use signed_bls_to_execution_change::SignedBlsToExecutionChange; +pub use signed_execution_payload_bid::SignedExecutionPayloadBid; +pub use signed_execution_payload_envelope::SignedExecutionPayloadEnvelope; diff --git a/consensus/types/src/payload.rs b/consensus/types/src/execution/payload.rs similarity index 87% rename from consensus/types/src/payload.rs rename to consensus/types/src/execution/payload.rs index 28dc10f9384..703b082c182 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/execution/payload.rs @@ -1,16 +1,30 @@ -use crate::{test_utils::TestRandom, *}; -use derivative::Derivative; +use educe::Educe; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::borrow::Cow; -use std::fmt::Debug; -use std::hash::Hash; +use ssz_types::VariableList; +use std::{borrow::Cow, fmt::Debug, hash::Hash}; +use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec, Hash256}, + execution::{ + ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, + ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadRef, Transactions, + }, + fork::ForkName, + map_execution_payload_into_blinded_payload, map_execution_payload_into_full_payload, + state::BeaconStateError, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum BlockType { Blinded, @@ -38,8 +52,8 @@ pub trait ExecPayload: Debug + Clone + PartialEq + Hash + TreeHash + fn gas_limit(&self) -> u64; fn transactions(&self) -> Option<&Transactions>; /// fork-specific fields - fn withdrawals_root(&self) -> Result; - fn blob_gas_used(&self) -> Result; + fn withdrawals_root(&self) -> Result; + fn blob_gas_used(&self) -> Result; /// Is this a default payload with 0x0 roots for transactions and withdrawals? fn is_default_with_zero_roots(&self) -> bool; @@ -105,7 +119,6 @@ pub trait AbstractExecPayload: + TryInto + TryInto + TryInto - + TryInto + Sync { type Ref<'a>: ExecPayload @@ -114,8 +127,7 @@ pub trait AbstractExecPayload: + From<&'a Self::Capella> + From<&'a Self::Deneb> + From<&'a Self::Electra> - + From<&'a Self::Fulu> - + From<&'a Self::Gloas>; + + From<&'a Self::Fulu>; type Bellatrix: OwnedExecPayload + Into @@ -142,15 +154,10 @@ pub trait AbstractExecPayload: + for<'a> From>> + TryFrom> + Sync; - type Gloas: OwnedExecPayload - + Into - + for<'a> From>> - + TryFrom> - + Sync; } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -161,9 +168,9 @@ pub trait AbstractExecPayload: Decode, TestRandom, TreeHash, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -173,22 +180,28 @@ pub trait AbstractExecPayload: ssz(struct_behaviour = "transparent"), ), ref_attributes( - derive(Debug, Derivative, TreeHash), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + derive(Debug, Educe, TreeHash), + educe(PartialEq, Hash(bound(E: EthSpec))), tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct FullPayload { @@ -205,8 +218,6 @@ pub struct FullPayload { pub execution_payload: ExecutionPayloadElectra, #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] pub execution_payload: ExecutionPayloadFulu, - #[superstruct(only(Gloas), partial_getter(rename = "execution_payload_gloas"))] - pub execution_payload: ExecutionPayloadGloas, } impl From> for ExecutionPayload { @@ -311,26 +322,24 @@ impl ExecPayload for FullPayload { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayload::Capella(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Deneb(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Electra(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), FullPayload::Fulu(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), - FullPayload::Gloas(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayload::Bellatrix(_) | FullPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayload::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Fulu(inner) => Ok(inner.execution_payload.blob_gas_used), - FullPayload::Gloas(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -354,15 +363,15 @@ impl FullPayload { }) } - pub fn default_at_fork(fork_name: ForkName) -> Result { + pub fn default_at_fork(fork_name: ForkName) -> Result { match fork_name { - ForkName::Base | ForkName::Altair => Err(Error::IncorrectStateVariant), + ForkName::Base | ForkName::Altair => Err(BeaconStateError::IncorrectStateVariant), ForkName::Bellatrix => Ok(FullPayloadBellatrix::default().into()), ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), ForkName::Electra => Ok(FullPayloadElectra::default().into()), ForkName::Fulu => Ok(FullPayloadFulu::default().into()), - ForkName::Gloas => Ok(FullPayloadGloas::default().into()), + ForkName::Gloas => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -450,9 +459,9 @@ impl ExecPayload for FullPayloadRef<'_, E> { }) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - FullPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + FullPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), FullPayloadRef::Capella(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } @@ -463,21 +472,17 @@ impl ExecPayload for FullPayloadRef<'_, E> { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), - FullPayloadRef::Gloas(inner) => { - Ok(inner.execution_payload.withdrawals.tree_hash_root()) - } } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { FullPayloadRef::Bellatrix(_) | FullPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.blob_gas_used), - FullPayloadRef::Gloas(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -501,7 +506,6 @@ impl AbstractExecPayload for FullPayload { type Deneb = FullPayloadDeneb; type Electra = FullPayloadElectra; type Fulu = FullPayloadFulu; - type Gloas = FullPayloadGloas; } impl From> for FullPayload { @@ -520,7 +524,7 @@ impl TryFrom> for FullPayload { } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -531,9 +535,9 @@ impl TryFrom> for FullPayload { Decode, TestRandom, TreeHash, - Derivative, + Educe, ), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -543,21 +547,27 @@ impl TryFrom> for FullPayload { ssz(struct_behaviour = "transparent"), ), ref_attributes( - derive(Debug, Derivative, TreeHash), - derivative(PartialEq, Hash(bound = "E: EthSpec")), + derive(Debug, Educe, TreeHash), + educe(PartialEq, Hash(bound(E: EthSpec))), tree_hash(enum_behaviour = "transparent"), ), map_into(ExecutionPayloadHeader), - cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ) )] #[cfg_attr( feature = "arbitrary", derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[tree_hash(enum_behaviour = "transparent")] pub struct BlindedPayload { @@ -574,8 +584,6 @@ pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] pub execution_payload_header: ExecutionPayloadHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "execution_payload_gloas"))] - pub execution_payload_header: ExecutionPayloadHeaderGloas, } impl<'a, E: EthSpec> From> for BlindedPayload { @@ -658,26 +666,24 @@ impl ExecPayload for BlindedPayload { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayload::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayload::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayload::Capella(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.withdrawals_root), BlindedPayload::Fulu(inner) => Ok(inner.execution_payload_header.withdrawals_root), - BlindedPayload::Gloas(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayload::Bellatrix(_) | BlindedPayload::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayload::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Fulu(inner) => Ok(inner.execution_payload_header.blob_gas_used), - BlindedPayload::Gloas(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -766,9 +772,9 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { None } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { match self { - BlindedPayloadRef::Bellatrix(_) => Err(Error::IncorrectStateVariant), + BlindedPayloadRef::Bellatrix(_) => Err(BeaconStateError::IncorrectStateVariant), BlindedPayloadRef::Capella(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } @@ -777,19 +783,17 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { Ok(inner.execution_payload_header.withdrawals_root) } BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.withdrawals_root), - BlindedPayloadRef::Gloas(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { match self { BlindedPayloadRef::Bellatrix(_) | BlindedPayloadRef::Capella(_) => { - Err(Error::IncorrectStateVariant) + Err(BeaconStateError::IncorrectStateVariant) } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.blob_gas_used), - BlindedPayloadRef::Gloas(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -877,12 +881,12 @@ macro_rules! impl_exec_payload_common { f(self) } - fn withdrawals_root(&self) -> Result { + fn withdrawals_root(&self) -> Result { let g = $g; g(self) } - fn blob_gas_used(&self) -> Result { + fn blob_gas_used(&self) -> Result { let h = $h; h(self) } @@ -917,15 +921,16 @@ macro_rules! impl_exec_payload_for_fork { }, { |_| { None } }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = - |payload: &$wrapper_type_header| { - let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); - wrapper_ref_type.withdrawals_root() - }; + let c: for<'a> fn( + &'a $wrapper_type_header, + ) -> Result = |payload: &$wrapper_type_header| { + let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); + wrapper_ref_type.withdrawals_root() + }; c }, { - let c: for<'a> fn(&'a $wrapper_type_header) -> Result = + let c: for<'a> fn(&'a $wrapper_type_header) -> Result = |payload: &$wrapper_type_header| { let wrapper_ref_type = BlindedPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -935,12 +940,12 @@ macro_rules! impl_exec_payload_for_fork { ); impl TryInto<$wrapper_type_header> for BlindedPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_header, Self::Error> { match self { BlindedPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::IncorrectStateVariant), + _ => Err(BeaconStateError::IncorrectStateVariant), } } } @@ -963,13 +968,13 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_header { - type Error = Error; + type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { match header { ExecutionPayloadHeader::$fork_variant(execution_payload_header) => { Ok(execution_payload_header.into()) } - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } @@ -1004,7 +1009,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.withdrawals_root() @@ -1012,7 +1017,7 @@ macro_rules! impl_exec_payload_for_fork { c }, { - let c: for<'a> fn(&'a $wrapper_type_full) -> Result = + let c: for<'a> fn(&'a $wrapper_type_full) -> Result = |payload: &$wrapper_type_full| { let wrapper_ref_type = FullPayloadRef::$fork_variant(&payload); wrapper_ref_type.blob_gas_used() @@ -1039,26 +1044,26 @@ macro_rules! impl_exec_payload_for_fork { } impl TryFrom> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: ExecutionPayloadHeader) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryFrom<$wrapped_type_header> for $wrapper_type_full { - type Error = Error; + type Error = BeaconStateError; fn try_from(_: $wrapped_type_header) -> Result { - Err(Error::PayloadConversionLogicFlaw) + Err(BeaconStateError::PayloadConversionLogicFlaw) } } impl TryInto<$wrapper_type_full> for FullPayload { - type Error = Error; + type Error = BeaconStateError; fn try_into(self) -> Result<$wrapper_type_full, Self::Error> { match self { FullPayload::$fork_variant(payload) => Ok(payload), - _ => Err(Error::PayloadConversionLogicFlaw), + _ => Err(BeaconStateError::PayloadConversionLogicFlaw), } } } @@ -1100,13 +1105,6 @@ impl_exec_payload_for_fork!( ExecutionPayloadFulu, Fulu ); -impl_exec_payload_for_fork!( - BlindedPayloadGloas, - FullPayloadGloas, - ExecutionPayloadHeaderGloas, - ExecutionPayloadGloas, - Gloas -); impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, E>; @@ -1115,7 +1113,6 @@ impl AbstractExecPayload for BlindedPayload { type Deneb = BlindedPayloadDeneb; type Electra = BlindedPayloadElectra; type Fulu = BlindedPayloadFulu; - type Gloas = BlindedPayloadGloas; } impl From> for BlindedPayload { @@ -1157,11 +1154,6 @@ impl From> for BlindedPayload { execution_payload_header, }) } - ExecutionPayloadHeader::Gloas(execution_payload_header) => { - Self::Gloas(BlindedPayloadGloas { - execution_payload_header, - }) - } } } } @@ -1184,9 +1176,6 @@ impl From> for ExecutionPayloadHeader { BlindedPayload::Fulu(blinded_payload) => { ExecutionPayloadHeader::Fulu(blinded_payload.execution_payload_header) } - BlindedPayload::Gloas(blinded_payload) => { - ExecutionPayloadHeader::Gloas(blinded_payload.execution_payload_header) - } } } } diff --git a/consensus/types/src/signed_bls_to_execution_change.rs b/consensus/types/src/execution/signed_bls_to_execution_change.rs similarity index 78% rename from consensus/types/src/signed_bls_to_execution_change.rs rename to consensus/types/src/execution/signed_bls_to_execution_change.rs index 910c4c7d7ef..535960fb3f9 100644 --- a/consensus/types/src/signed_bls_to_execution_change.rs +++ b/consensus/types/src/execution/signed_bls_to_execution_change.rs @@ -1,10 +1,12 @@ -use crate::test_utils::TestRandom; -use crate::*; +use bls::Signature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{execution::BlsToExecutionChange, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/execution/signed_execution_payload_bid.rs b/consensus/types/src/execution/signed_execution_payload_bid.rs new file mode 100644 index 00000000000..29dfd03ba03 --- /dev/null +++ b/consensus/types/src/execution/signed_execution_payload_bid.rs @@ -0,0 +1,35 @@ +use crate::test_utils::TestRandom; +use crate::{ExecutionPayloadBid, ForkName}; +use bls::Signature; +use context_deserialize::context_deserialize; +use educe::Educe; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(TestRandom, TreeHash, Debug, Clone, Encode, Decode, Serialize, Deserialize, Educe)] +#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[educe(PartialEq, Hash)] +#[context_deserialize(ForkName)] +// https://github.com/ethereum/consensus-specs/blob/master/specs/gloas/beacon-chain.md#signedexecutionpayloadbid +pub struct SignedExecutionPayloadBid { + pub message: ExecutionPayloadBid, + pub signature: Signature, +} + +impl SignedExecutionPayloadBid { + pub fn empty() -> Self { + Self { + message: ExecutionPayloadBid::default(), + signature: Signature::empty(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + ssz_and_tree_hash_tests!(SignedExecutionPayloadBid); +} diff --git a/consensus/types/src/execution/signed_execution_payload_envelope.rs b/consensus/types/src/execution/signed_execution_payload_envelope.rs new file mode 100644 index 00000000000..16410416157 --- /dev/null +++ b/consensus/types/src/execution/signed_execution_payload_envelope.rs @@ -0,0 +1,24 @@ +use crate::test_utils::TestRandom; +use crate::{EthSpec, ExecutionPayloadEnvelope}; +use bls::Signature; +use educe::Educe; +use serde::{Deserialize, Serialize}; +use ssz_derive::{Decode, Encode}; +use test_random_derive::TestRandom; +use tree_hash_derive::TreeHash; + +#[derive(Debug, Clone, Serialize, Encode, Decode, Deserialize, TestRandom, TreeHash, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] +#[serde(bound = "E: EthSpec")] +pub struct SignedExecutionPayloadEnvelope { + pub message: ExecutionPayloadEnvelope, + pub signature: Signature, +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::MainnetEthSpec; + + ssz_and_tree_hash_tests!(SignedExecutionPayloadEnvelope); +} diff --git a/consensus/types/src/exit/mod.rs b/consensus/types/src/exit/mod.rs new file mode 100644 index 00000000000..cb066d1d7a0 --- /dev/null +++ b/consensus/types/src/exit/mod.rs @@ -0,0 +1,5 @@ +mod signed_voluntary_exit; +mod voluntary_exit; + +pub use signed_voluntary_exit::SignedVoluntaryExit; +pub use voluntary_exit::VoluntaryExit; diff --git a/consensus/types/src/signed_voluntary_exit.rs b/consensus/types/src/exit/signed_voluntary_exit.rs similarity index 84% rename from consensus/types/src/signed_voluntary_exit.rs rename to consensus/types/src/exit/signed_voluntary_exit.rs index 0beffa1e04a..b49401a7215 100644 --- a/consensus/types/src/signed_voluntary_exit.rs +++ b/consensus/types/src/exit/signed_voluntary_exit.rs @@ -1,12 +1,12 @@ -use crate::context_deserialize; -use crate::{ForkName, VoluntaryExit, test_utils::TestRandom}; use bls::Signature; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{exit::VoluntaryExit, fork::ForkName, test_utils::TestRandom}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/voluntary_exit.rs b/consensus/types/src/exit/voluntary_exit.rs similarity index 90% rename from consensus/types/src/voluntary_exit.rs rename to consensus/types/src/exit/voluntary_exit.rs index 42d792a814d..30c6a97c4d1 100644 --- a/consensus/types/src/voluntary_exit.rs +++ b/consensus/types/src/exit/voluntary_exit.rs @@ -1,14 +1,17 @@ -use crate::context_deserialize; -use crate::{ - ChainSpec, Domain, Epoch, ForkName, Hash256, SecretKey, SignedRoot, SignedVoluntaryExit, - test_utils::TestRandom, -}; - +use bls::SecretKey; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, Epoch, Hash256, SignedRoot}, + exit::SignedVoluntaryExit, + fork::ForkName, + test_utils::TestRandom, +}; + /// An exit voluntarily submitted a validator who wishes to withdraw. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork.rs b/consensus/types/src/fork/fork.rs similarity index 96% rename from consensus/types/src/fork.rs rename to consensus/types/src/fork/fork.rs index 5c5bd7ffd18..371b11e05c5 100644 --- a/consensus/types/src/fork.rs +++ b/consensus/types/src/fork/fork.rs @@ -1,12 +1,11 @@ -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork/fork_context.rs similarity index 96% rename from consensus/types/src/fork_context.rs rename to consensus/types/src/fork/fork_context.rs index 66617326e13..3407689e790 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork/fork_context.rs @@ -1,7 +1,11 @@ +use std::collections::BTreeMap; + use parking_lot::RwLock; -use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::BTreeMap; +use crate::{ + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, +}; /// Represents a hard fork in the consensus protocol. /// @@ -59,8 +63,7 @@ impl ForkContext { let current_epoch = current_slot.epoch(E::slots_per_epoch()); let current_fork = epoch_to_forks .values() - .filter(|&fork| fork.fork_epoch <= current_epoch) - .next_back() + .rfind(|&fork| fork.fork_epoch <= current_epoch) .cloned() .expect("should match at least genesis epoch"); @@ -152,8 +155,7 @@ impl ForkContext { #[cfg(test)] mod tests { use super::*; - use crate::MainnetEthSpec; - use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::core::{BlobParameters, BlobSchedule, MainnetEthSpec}; type E = MainnetEthSpec; @@ -180,6 +182,7 @@ mod tests { spec.deneb_fork_epoch = Some(Epoch::new(4)); spec.electra_fork_epoch = Some(Epoch::new(5)); spec.fulu_fork_epoch = Some(Epoch::new(6)); + spec.gloas_fork_epoch = Some(Epoch::new(7)); spec.blob_schedule = BlobSchedule::new(blob_parameters); spec } @@ -194,6 +197,7 @@ mod tests { assert!(context.fork_exists(ForkName::Electra)); assert!(context.fork_exists(ForkName::Fulu)); + assert!(context.fork_exists(ForkName::Gloas)); } #[test] diff --git a/consensus/types/src/fork_data.rs b/consensus/types/src/fork/fork_data.rs similarity index 88% rename from consensus/types/src/fork_data.rs rename to consensus/types/src/fork/fork_data.rs index 2d5e905efb9..1b9c8bad9ff 100644 --- a/consensus/types/src/fork_data.rs +++ b/consensus/types/src/fork/fork_data.rs @@ -1,12 +1,15 @@ -use crate::test_utils::TestRandom; -use crate::{ForkName, Hash256, SignedRoot}; use context_deserialize::context_deserialize; - use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Hash256, SignedRoot}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Specifies a fork of the `BeaconChain`, to prevent replay attacks. /// /// Spec v0.12.1 diff --git a/consensus/types/src/fork/fork_macros.rs b/consensus/types/src/fork/fork_macros.rs new file mode 100644 index 00000000000..0c7f382ffc5 --- /dev/null +++ b/consensus/types/src/fork/fork_macros.rs @@ -0,0 +1,60 @@ +/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. +/// +/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism +/// in the return type, which is not usually possible in Rust without trait objects. +/// +/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode +/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body +/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is +/// taken, the important thing is that they are re-unified by injecting them back into the +/// `BeaconBlock` parent enum. +/// +/// If you would also like to extract additional data alongside the superstruct type, use +/// the more flexible `map_fork_name_with` macro. +#[macro_export] +macro_rules! map_fork_name { + ($fork_name:expr, $t:tt, $body:expr) => { + $crate::map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 + }; +} + +/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. +#[macro_export] +macro_rules! map_fork_name_with { + ($fork_name:expr, $t:tt, $body:block) => { + match $fork_name { + $crate::fork::ForkName::Base => { + let (value, extra_data) = $body; + ($t::Base(value), extra_data) + } + $crate::fork::ForkName::Altair => { + let (value, extra_data) = $body; + ($t::Altair(value), extra_data) + } + $crate::fork::ForkName::Bellatrix => { + let (value, extra_data) = $body; + ($t::Bellatrix(value), extra_data) + } + $crate::fork::ForkName::Capella => { + let (value, extra_data) = $body; + ($t::Capella(value), extra_data) + } + $crate::fork::ForkName::Deneb => { + let (value, extra_data) = $body; + ($t::Deneb(value), extra_data) + } + $crate::fork::ForkName::Electra => { + let (value, extra_data) = $body; + ($t::Electra(value), extra_data) + } + $crate::fork::ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } + $crate::fork::ForkName::Gloas => { + let (value, extra_data) = $body; + ($t::Gloas(value), extra_data) + } + } + }; +} diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork/fork_name.rs similarity index 84% rename from consensus/types/src/fork_name.rs rename to consensus/types/src/fork/fork_name.rs index 338e2b1e759..e9ec5fbe41e 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork/fork_name.rs @@ -1,8 +1,12 @@ -use crate::{ChainSpec, Epoch}; +use std::{ + fmt::{self, Display, Formatter}, + str::FromStr, +}; + use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::fmt::{self, Display, Formatter}; -use std::str::FromStr; + +use crate::core::{ChainSpec, Epoch}; #[derive( Debug, Clone, Copy, Decode, Encode, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, @@ -144,7 +148,7 @@ impl ForkName { /// Return the name of the fork immediately prior to the current one. /// - /// If `self` is `ForkName::Base` then `Base` is returned. + /// If `self` is `ForkName::Base` then `None` is returned. pub fn previous_fork(self) -> Option { match self { ForkName::Base => None, @@ -243,67 +247,6 @@ impl ForkName { } } -/// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. -/// -/// The `$body` expression is where the magic happens. The macro allows us to achieve polymorphism -/// in the return type, which is not usually possible in Rust without trait objects. -/// -/// E.g. you could call `map_fork_name!(fork, BeaconBlock, serde_json::from_str(s))` to decode -/// different `BeaconBlock` variants depending on the value of `fork`. Note how the type of the body -/// will change between `BeaconBlockBase` and `BeaconBlockAltair` depending on which branch is -/// taken, the important thing is that they are re-unified by injecting them back into the -/// `BeaconBlock` parent enum. -/// -/// If you would also like to extract additional data alongside the superstruct type, use -/// the more flexible `map_fork_name_with` macro. -#[macro_export] -macro_rules! map_fork_name { - ($fork_name:expr, $t:tt, $body:expr) => { - map_fork_name_with!($fork_name, $t, { ($body, ()) }).0 - }; -} - -/// Map a fork name into a tuple of `(t, extra)` where `t` is a superstruct type. -#[macro_export] -macro_rules! map_fork_name_with { - ($fork_name:expr, $t:tt, $body:block) => { - match $fork_name { - ForkName::Base => { - let (value, extra_data) = $body; - ($t::Base(value), extra_data) - } - ForkName::Altair => { - let (value, extra_data) = $body; - ($t::Altair(value), extra_data) - } - ForkName::Bellatrix => { - let (value, extra_data) = $body; - ($t::Bellatrix(value), extra_data) - } - ForkName::Capella => { - let (value, extra_data) = $body; - ($t::Capella(value), extra_data) - } - ForkName::Deneb => { - let (value, extra_data) = $body; - ($t::Deneb(value), extra_data) - } - ForkName::Electra => { - let (value, extra_data) = $body; - ($t::Electra(value), extra_data) - } - ForkName::Fulu => { - let (value, extra_data) = $body; - ($t::Fulu(value), extra_data) - } - ForkName::Gloas => { - let (value, extra_data) = $body; - ($t::Gloas(value), extra_data) - } - } - }; -} - impl FromStr for ForkName { type Err = String; diff --git a/consensus/types/src/fork/fork_version_decode.rs b/consensus/types/src/fork/fork_version_decode.rs new file mode 100644 index 00000000000..4349efb21f9 --- /dev/null +++ b/consensus/types/src/fork/fork_version_decode.rs @@ -0,0 +1,6 @@ +use crate::fork::ForkName; + +pub trait ForkVersionDecode: Sized { + /// SSZ decode with explicit fork variant. + fn from_ssz_bytes_by_fork(bytes: &[u8], fork_name: ForkName) -> Result; +} diff --git a/consensus/types/src/fork/mod.rs b/consensus/types/src/fork/mod.rs new file mode 100644 index 00000000000..1ad1c7cb622 --- /dev/null +++ b/consensus/types/src/fork/mod.rs @@ -0,0 +1,15 @@ +mod fork; +mod fork_context; +mod fork_data; +mod fork_macros; +mod fork_name; +mod fork_version_decode; + +pub use crate::{map_fork_name, map_fork_name_with}; +pub use fork::Fork; +pub use fork_context::{ForkContext, HardFork}; +pub use fork_data::ForkData; +pub use fork_name::{ForkName, InconsistentFork}; +pub use fork_version_decode::ForkVersionDecode; + +pub type ForkVersion = [u8; 4]; diff --git a/consensus/types/src/kzg_ext/consts.rs b/consensus/types/src/kzg_ext/consts.rs new file mode 100644 index 00000000000..06c9f9c749e --- /dev/null +++ b/consensus/types/src/kzg_ext/consts.rs @@ -0,0 +1,3 @@ +pub use kzg::{ + BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, VERSIONED_HASH_VERSION_KZG, +}; diff --git a/consensus/types/src/kzg_ext/mod.rs b/consensus/types/src/kzg_ext/mod.rs new file mode 100644 index 00000000000..63533ec71f5 --- /dev/null +++ b/consensus/types/src/kzg_ext/mod.rs @@ -0,0 +1,27 @@ +pub mod consts; + +pub use kzg::{Blob as KzgBlob, Error as KzgError, Kzg, KzgCommitment, KzgProof}; + +use ssz_types::VariableList; + +use crate::core::EthSpec; + +// Note on List limit: +// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` +// - Fulu: `MaxCellsPerBlock` +// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to +// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types +// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, +// which we don't current do on `KzgProofs` anyway. +pub type KzgProofs = VariableList::MaxCellsPerBlock>; + +pub type KzgCommitments = + VariableList::MaxBlobCommitmentsPerBlock>; + +/// Util method helpful for logging. +pub fn format_kzg_commitments(commitments: &[KzgCommitment]) -> String { + let commitment_strings: Vec = commitments.iter().map(|x| x.to_string()).collect(); + let commitments_joined = commitment_strings.join(", "); + let surrounded_commitments = format!("[{}]", commitments_joined); + surrounded_commitments +} diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 8e83fed1d9a..5a89fcb1d48 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -1,4 +1,4 @@ -//! Ethereum 2.0 types +//! Ethereum Consensus types // Clippy lint set up #![cfg_attr( not(test), @@ -12,287 +12,166 @@ #[macro_use] pub mod test_utils; -pub mod aggregate_and_proof; -pub mod application_domain; pub mod attestation; -pub mod attestation_data; -pub mod attestation_duty; -pub mod attester_slashing; -pub mod beacon_block; -pub mod beacon_block_body; -pub mod beacon_block_header; -pub mod beacon_committee; -pub mod beacon_response; -pub mod beacon_state; -pub mod bls_to_execution_change; -pub mod builder_bid; -pub mod chain_spec; -pub mod checkpoint; -pub mod consolidation_request; -pub mod consts; -pub mod contribution_and_proof; +pub mod block; +pub mod builder; +pub mod consolidation; +pub mod core; +pub mod data; pub mod deposit; -pub mod deposit_data; -pub mod deposit_message; -pub mod deposit_request; -pub mod deposit_tree_snapshot; -pub mod enr_fork_id; -pub mod eth1_data; -pub mod eth_spec; -pub mod execution_block_hash; -pub mod execution_payload; -pub mod execution_payload_header; +pub mod execution; +pub mod exit; pub mod fork; -pub mod fork_data; -pub mod fork_name; -pub mod graffiti; -pub mod historical_batch; -pub mod historical_summary; -pub mod indexed_attestation; -pub mod light_client_bootstrap; -pub mod light_client_finality_update; -pub mod light_client_optimistic_update; -pub mod light_client_update; -pub mod pending_attestation; -pub mod pending_consolidation; -pub mod pending_deposit; -pub mod pending_partial_withdrawal; -pub mod proposer_preparation_data; -pub mod proposer_slashing; -pub mod relative_epoch; -pub mod selection_proof; -pub mod shuffling_id; -pub mod signed_aggregate_and_proof; -pub mod signed_beacon_block; -pub mod signed_beacon_block_header; -pub mod signed_bls_to_execution_change; -pub mod signed_contribution_and_proof; -pub mod signed_voluntary_exit; -pub mod signing_data; -pub mod sync_committee_subscription; -pub mod sync_duty; -pub mod validator; -pub mod validator_subscription; -pub mod voluntary_exit; -pub mod withdrawal_credentials; -pub mod withdrawal_request; -#[macro_use] -pub mod slot_epoch_macros; -pub mod activation_queue; -pub mod config_and_preset; -pub mod execution_block_header; -pub mod execution_requests; -pub mod fork_context; -pub mod participation_flags; -pub mod payload; -pub mod preset; -pub mod slot_epoch; -pub mod subnet_id; -pub mod sync_aggregate; -pub mod sync_aggregator_selection_data; +pub mod kzg_ext; +pub mod light_client; +pub mod slashing; +pub mod state; pub mod sync_committee; -pub mod sync_committee_contribution; -pub mod sync_committee_message; -pub mod sync_selection_proof; -pub mod sync_subnet_id; -pub mod validator_registration_data; +pub mod validator; pub mod withdrawal; -pub mod epoch_cache; -pub mod slot_data; -#[cfg(feature = "sqlite")] -pub mod sqlite; +// Temporary root level exports to maintain backwards compatibility for Lighthouse. +pub use attestation::*; +pub use block::*; +pub use builder::*; +pub use consolidation::*; +pub use core::{consts, *}; +pub use data::*; +pub use deposit::*; +pub use execution::*; +pub use exit::*; +pub use fork::*; +pub use kzg_ext::*; +pub use light_client::*; +pub use slashing::*; +pub use state::*; +pub use sync_committee::*; +pub use validator::*; +pub use withdrawal::*; + +// Temporary facade modules to maintain backwards compatibility for Lighthouse. +pub mod eth_spec { + pub use crate::core::EthSpec; +} + +pub mod chain_spec { + pub use crate::core::ChainSpec; +} + +pub mod beacon_block { + pub use crate::block::{BlindedBeaconBlock, BlockImportSource}; +} + +pub mod beacon_block_body { + pub use crate::kzg_ext::{KzgCommitments, format_kzg_commitments}; +} + +pub mod beacon_state { + pub use crate::state::{ + BeaconState, BeaconStateBase, CommitteeCache, compute_committee_index_in_epoch, + compute_committee_range_in_epoch, epoch_committee_count, + }; +} + +pub mod graffiti { + pub use crate::core::GraffitiString; +} + +pub mod indexed_attestation { + pub use crate::attestation::{IndexedAttestationBase, IndexedAttestationElectra}; +} + +pub mod historical_summary { + pub use crate::state::HistoricalSummary; +} + +pub mod participation_flags { + pub use crate::attestation::ParticipationFlags; +} + +pub mod epoch_cache { + pub use crate::state::{EpochCache, EpochCacheError, EpochCacheKey}; +} + +pub mod non_zero_usize { + pub use crate::core::new_non_zero_usize; +} + +pub mod data_column_sidecar { + pub use crate::data::{ + Cell, ColumnIndex, DataColumn, DataColumnSidecar, DataColumnSidecarError, + DataColumnSidecarList, + }; +} + +pub mod builder_bid { + pub use crate::builder::*; +} + +pub mod blob_sidecar { + pub use crate::data::{ + BlobIdentifier, BlobSidecar, BlobSidecarError, BlobsList, FixedBlobSidecarList, + }; +} + +pub mod payload { + pub use crate::execution::BlockProductionVersion; +} + +pub mod execution_requests { + pub use crate::execution::{ + ConsolidationRequests, DepositRequests, ExecutionRequests, RequestType, WithdrawalRequests, + }; +} + +pub mod execution_payload_envelope { + pub use crate::execution::{ExecutionPayloadEnvelope, SignedExecutionPayloadEnvelope}; +} + +pub mod data_column_custody_group { + pub use crate::data::{ + CustodyIndex, compute_columns_for_custody_group, compute_ordered_custody_column_indices, + compute_subnets_for_node, compute_subnets_from_custody_group, get_custody_groups, + }; +} + +pub mod sync_aggregate { + pub use crate::sync_committee::SyncAggregateError as Error; +} + +pub mod light_client_update { + pub use crate::light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, MAX_REQUEST_LIGHT_CLIENT_UPDATES, NEXT_SYNC_COMMITTEE_INDEX, + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }; +} + +pub mod sync_committee_contribution { + pub use crate::sync_committee::{ + SyncCommitteeContributionError as Error, SyncContributionData, + }; +} + +pub mod slot_data { + pub use crate::core::SlotData; +} -pub mod blob_sidecar; -pub mod data_column_custody_group; -pub mod data_column_sidecar; -pub mod data_column_subnet_id; -pub mod light_client_header; -pub mod non_zero_usize; -pub mod runtime_fixed_vector; -pub mod runtime_var_list; +pub mod signed_aggregate_and_proof { + pub use crate::attestation::SignedAggregateAndProofRefMut; +} -pub use crate::activation_queue::ActivationQueue; -pub use crate::aggregate_and_proof::{ - AggregateAndProof, AggregateAndProofBase, AggregateAndProofElectra, AggregateAndProofRef, -}; -pub use crate::attestation::{ - Attestation, AttestationBase, AttestationElectra, AttestationRef, AttestationRefMut, - Error as AttestationError, SingleAttestation, -}; -pub use crate::attestation_data::AttestationData; -pub use crate::attestation_duty::AttestationDuty; -pub use crate::attester_slashing::{ - AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, - AttesterSlashingRef, AttesterSlashingRefOnDisk, -}; -pub use crate::beacon_block::{ - BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockGloas, BeaconBlockRef, - BeaconBlockRefMut, BlindedBeaconBlock, BlockImportSource, EmptyBlock, -}; -pub use crate::beacon_block_body::{ - BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, - BeaconBlockBodyGloas, BeaconBlockBodyRef, BeaconBlockBodyRefMut, -}; -pub use crate::beacon_block_header::BeaconBlockHeader; -pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_response::{ - BeaconResponse, ForkVersionDecode, ForkVersionedResponse, UnversionedResponse, -}; -pub use crate::beacon_state::{Error as BeaconStateError, *}; -pub use crate::blob_sidecar::{BlobIdentifier, BlobSidecar, BlobSidecarList, BlobsList}; -pub use crate::bls_to_execution_change::BlsToExecutionChange; -pub use crate::chain_spec::{ChainSpec, Config, Domain}; -pub use crate::checkpoint::Checkpoint; -pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, - ConfigAndPresetGloas, -}; -pub use crate::consolidation_request::ConsolidationRequest; -pub use crate::contribution_and_proof::ContributionAndProof; -pub use crate::data_column_sidecar::{ - ColumnIndex, DataColumnSidecar, DataColumnSidecarList, DataColumnsByRootIdentifier, -}; -pub use crate::data_column_subnet_id::DataColumnSubnetId; -pub use crate::deposit::{DEPOSIT_TREE_DEPTH, Deposit}; -pub use crate::deposit_data::DepositData; -pub use crate::deposit_message::DepositMessage; -pub use crate::deposit_request::DepositRequest; -pub use crate::deposit_tree_snapshot::{DepositTreeSnapshot, FinalizedExecutionBlock}; -pub use crate::enr_fork_id::EnrForkId; -pub use crate::epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; -pub use crate::eth_spec::EthSpecId; -pub use crate::eth1_data::Eth1Data; -pub use crate::execution_block_hash::ExecutionBlockHash; -pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; -pub use crate::execution_payload::{ - ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadGloas, ExecutionPayloadRef, - Transaction, Transactions, Withdrawals, -}; -pub use crate::execution_payload_header::{ - ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, - ExecutionPayloadHeaderGloas, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, -}; -pub use crate::execution_requests::{ExecutionRequests, RequestType}; -pub use crate::fork::Fork; -pub use crate::fork_context::ForkContext; -pub use crate::fork_data::ForkData; -pub use crate::fork_name::{ForkName, InconsistentFork}; -pub use crate::graffiti::{GRAFFITI_BYTES_LEN, Graffiti}; -pub use crate::historical_batch::HistoricalBatch; -pub use crate::indexed_attestation::{ - IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -pub use crate::light_client_bootstrap::{ - LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, - LightClientBootstrapGloas, -}; -pub use crate::light_client_finality_update::{ - LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, - LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, - LightClientFinalityUpdateFulu, LightClientFinalityUpdateGloas, -}; -pub use crate::light_client_header::{ - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, -}; -pub use crate::light_client_optimistic_update::{ - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, - LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, - LightClientOptimisticUpdateGloas, -}; -pub use crate::light_client_update::{ - Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, - LightClientUpdateFulu, LightClientUpdateGloas, MerkleProof, -}; -pub use crate::participation_flags::ParticipationFlags; -pub use crate::payload::{ - AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadGloas, - BlindedPayloadRef, BlockType, ExecPayload, FullPayload, FullPayloadBellatrix, - FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, FullPayloadFulu, FullPayloadGloas, - FullPayloadRef, OwnedExecPayload, -}; -pub use crate::pending_attestation::PendingAttestation; -pub use crate::pending_consolidation::PendingConsolidation; -pub use crate::pending_deposit::PendingDeposit; -pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; -pub use crate::preset::{ - AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, - FuluPreset, GloasPreset, -}; -pub use crate::proposer_preparation_data::ProposerPreparationData; -pub use crate::proposer_slashing::ProposerSlashing; -pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; -pub use crate::runtime_fixed_vector::RuntimeFixedVector; -pub use crate::runtime_var_list::RuntimeVariableList; -pub use crate::selection_proof::SelectionProof; -pub use crate::shuffling_id::AttestationShufflingId; -pub use crate::signed_aggregate_and_proof::{ - SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, -}; -pub use crate::signed_beacon_block::{ - SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockFulu, SignedBeaconBlockGloas, SignedBeaconBlockHash, SignedBlindedBeaconBlock, - ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, -}; -pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; -pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; -pub use crate::signed_contribution_and_proof::SignedContributionAndProof; -pub use crate::signed_voluntary_exit::SignedVoluntaryExit; -pub use crate::signing_data::{SignedRoot, SigningData}; -pub use crate::slot_epoch::{Epoch, Slot}; -pub use crate::subnet_id::SubnetId; -pub use crate::sync_aggregate::SyncAggregate; -pub use crate::sync_aggregator_selection_data::SyncAggregatorSelectionData; -pub use crate::sync_committee::SyncCommittee; -pub use crate::sync_committee_contribution::{SyncCommitteeContribution, SyncContributionData}; -pub use crate::sync_committee_message::SyncCommitteeMessage; -pub use crate::sync_committee_subscription::SyncCommitteeSubscription; -pub use crate::sync_duty::SyncDuty; -pub use crate::sync_selection_proof::SyncSelectionProof; -pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; -pub use crate::validator_registration_data::*; -pub use crate::validator_subscription::ValidatorSubscription; -pub use crate::voluntary_exit::VoluntaryExit; -pub use crate::withdrawal::Withdrawal; -pub use crate::withdrawal_credentials::WithdrawalCredentials; -pub use crate::withdrawal_request::WithdrawalRequest; -pub use fixed_bytes::FixedBytesExtended; +pub mod payload_attestation { + pub use crate::attestation::{ + PayloadAttestation, PayloadAttestationData, PayloadAttestationMessage, + }; +} -pub type CommitteeIndex = u64; -pub type Hash256 = fixed_bytes::Hash256; -pub type Uint256 = fixed_bytes::Uint256; -pub type Address = fixed_bytes::Address; -pub type ForkVersion = [u8; 4]; -pub type BLSFieldElement = Uint256; -pub type Blob = FixedVector::BytesPerBlob>; -// Note on List limit: -// - Deneb to Electra: `MaxBlobCommitmentsPerBlock` -// - Fulu: `MaxCellsPerBlock` -// We choose to use a single type (with the larger value from Fulu as `N`) instead of having to -// introduce a new type for Fulu. This is to avoid messy conversions and having to add extra types -// with no gains - as `N` does not impact serialisation at all, and only affects merkleization, -// which we don't current do on `KzgProofs` anyway. -pub type KzgProofs = VariableList::MaxCellsPerBlock>; -pub type VersionedHash = Hash256; -pub type Hash64 = alloy_primitives::B64; +pub mod application_domain { + pub use crate::core::ApplicationDomain; +} -pub use bls::{ - AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, - Signature, SignatureBytes, -}; -pub use context_deserialize::{ContextDeserialize, context_deserialize}; -pub use kzg::{KzgCommitment, KzgProof, VERSIONED_HASH_VERSION_KZG}; -pub use milhouse::{self, List, Vector}; -pub use ssz_types::{BitList, BitVector, FixedVector, VariableList, typenum, typenum::Unsigned}; -pub use superstruct::superstruct; +// Temporary re-exports to maintain backwards compatibility for Lighthouse. +pub use crate::kzg_ext::consts::VERSIONED_HASH_VERSION_KZG; +pub use crate::light_client::LightClientError as LightClientUpdateError; +pub use crate::state::BeaconStateError as Error; diff --git a/consensus/types/src/light_client/consts.rs b/consensus/types/src/light_client/consts.rs new file mode 100644 index 00000000000..0092e75e873 --- /dev/null +++ b/consensus/types/src/light_client/consts.rs @@ -0,0 +1,21 @@ +pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; +pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; + +pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; +pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; +pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; + +pub const FINALIZED_ROOT_INDEX: usize = 105; +pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; +pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; +pub const EXECUTION_PAYLOAD_INDEX: usize = 25; + +pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; +pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; +pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; + +// Max light client updates by range request limits +// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration +pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; diff --git a/consensus/types/src/light_client/error.rs b/consensus/types/src/light_client/error.rs new file mode 100644 index 00000000000..4c7a30db5e6 --- /dev/null +++ b/consensus/types/src/light_client/error.rs @@ -0,0 +1,42 @@ +use safe_arith::ArithError; + +use crate::state::BeaconStateError; + +#[derive(Debug, PartialEq, Clone)] +pub enum LightClientError { + SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), + BeaconStateError(BeaconStateError), + ArithError(ArithError), + AltairForkNotActive, + NotEnoughSyncCommitteeParticipants, + MismatchingPeriods, + InvalidFinalizedBlock, + BeaconBlockBodyError, + InconsistentFork, + GloasNotImplemented, +} + +impl From for LightClientError { + fn from(e: ssz_types::Error) -> LightClientError { + LightClientError::SszTypesError(e) + } +} + +impl From for LightClientError { + fn from(e: BeaconStateError) -> LightClientError { + LightClientError::BeaconStateError(e) + } +} + +impl From for LightClientError { + fn from(e: ArithError) -> LightClientError { + LightClientError::ArithError(e) + } +} + +impl From for LightClientError { + fn from(e: milhouse::Error) -> LightClientError { + LightClientError::MilhouseError(e) + } +} diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client/light_client_bootstrap.rs similarity index 78% rename from consensus/types/src/light_client_bootstrap.rs rename to consensus/types/src/light_client/light_client_bootstrap.rs index 21e805f2c7c..fbcc0ef2b05 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client/light_client_bootstrap.rs @@ -1,36 +1,46 @@ -use crate::context_deserialize; -use crate::{ - BeaconState, ChainSpec, ContextDeserialize, EthSpec, FixedVector, ForkName, Hash256, - LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, Slot, SyncCommittee, light_client_update::*, test_utils::TestRandom, -}; -use derivative::Derivative; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, LightClientError, + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + state::BeaconState, + sync_committee::SyncCommittee, + test_utils::TestRandom, +}; + /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( - variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -62,8 +72,6 @@ pub struct LightClientBootstrap { pub header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] pub header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "header_gloas"))] - pub header: LightClientHeaderGloas, /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee @@ -73,7 +81,7 @@ pub struct LightClientBootstrap { )] pub current_sync_committee_branch: FixedVector, #[superstruct( - only(Electra, Fulu, Gloas), + only(Electra, Fulu), partial_getter(rename = "current_sync_committee_branch_electra") )] pub current_sync_committee_branch: FixedVector, @@ -90,7 +98,6 @@ impl LightClientBootstrap { Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), Self::Fulu(_) => func(ForkName::Fulu), - Self::Gloas(_) => func(ForkName::Gloas), } } @@ -110,8 +117,8 @@ impl LightClientBootstrap { ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientBootstrapElectra::from_ssz_bytes(bytes)?), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu::from_ssz_bytes(bytes)?), - ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas::from_ssz_bytes(bytes)?), - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientBootstrap decoding for {fork_name} not implemented" ))); @@ -132,7 +139,8 @@ impl LightClientBootstrap { ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), ForkName::Fulu => as Encode>::ssz_fixed_len(), - ForkName::Gloas => as Encode>::ssz_fixed_len(), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => as Encode>::ssz_fixed_len(), }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -142,42 +150,49 @@ impl LightClientBootstrap { current_sync_committee: Arc>, current_sync_committee_branch: Vec, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), - }), - ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { - header: LightClientHeaderGloas::block_to_light_client_header(block)?, - current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_bootstrap) @@ -187,45 +202,52 @@ impl LightClientBootstrap { beacon_state: &mut BeaconState, block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let current_sync_committee_branch = beacon_state.compute_current_sync_committee_proof()?; let current_sync_committee = beacon_state.current_sync_committee()?.clone(); let light_client_bootstrap = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => Self::Altair(LightClientBootstrapAltair { header: LightClientHeaderAltair::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Capella => Self::Capella(LightClientBootstrapCapella { header: LightClientHeaderCapella::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb { header: LightClientHeaderDeneb::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Electra => Self::Electra(LightClientBootstrapElectra { header: LightClientHeaderElectra::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { header: LightClientHeaderFulu::block_to_light_client_header(block)?, current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), - }), - ForkName::Gloas => Self::Gloas(LightClientBootstrapGloas { - header: LightClientHeaderGloas::block_to_light_client_header(block)?, - current_sync_committee, - current_sync_committee_branch: current_sync_committee_branch.into(), + current_sync_committee_branch: current_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, }), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_bootstrap) @@ -266,7 +288,11 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientBootstrap Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) + // TODO(EIP-7732): check if this is correct + return Err(serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: unsupported fork '{}'", + context + ))); } }) } @@ -304,10 +330,4 @@ mod tests { use crate::{LightClientBootstrapFulu, MainnetEthSpec}; ssz_tests!(LightClientBootstrapFulu); } - - #[cfg(test)] - mod gloas { - use crate::{LightClientBootstrapGloas, MainnetEthSpec}; - ssz_tests!(LightClientBootstrapGloas); - } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client/light_client_finality_update.rs similarity index 84% rename from consensus/types/src/light_client_finality_update.rs rename to consensus/types/src/light_client/light_client_finality_update.rs index 4fa98de40be..b503785b851 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client/light_client_finality_update.rs @@ -1,36 +1,42 @@ -use super::{EthSpec, FixedVector, Hash256, LightClientHeader, Slot, SyncAggregate}; -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{ - ContextDeserialize, ForkName, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, light_client_update::*, - test_utils::TestRandom, -}; -use derivative::Derivative; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientError, LightClientHeader, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + #[superstruct( - variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -62,8 +68,6 @@ pub struct LightClientFinalityUpdate { pub attested_header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] pub attested_header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "attested_header_gloas"))] - pub attested_header: LightClientHeaderGloas, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -75,8 +79,6 @@ pub struct LightClientFinalityUpdate { pub finalized_header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] pub finalized_header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "finalized_header_gloas"))] - pub finalized_header: LightClientHeaderGloas, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), @@ -84,7 +86,7 @@ pub struct LightClientFinalityUpdate { )] pub finality_branch: FixedVector, #[superstruct( - only(Electra, Fulu, Gloas), + only(Electra, Fulu), partial_getter(rename = "finality_branch_electra") )] pub finality_branch: FixedVector, @@ -103,10 +105,10 @@ impl LightClientFinalityUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let finality_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientFinalityUpdateAltair { @@ -116,7 +118,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderAltair::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }) @@ -128,7 +132,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderCapella::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -139,7 +145,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderDeneb::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -150,7 +158,9 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderElectra::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), @@ -161,23 +171,14 @@ impl LightClientFinalityUpdate { finalized_header: LightClientHeaderFulu::block_to_light_client_header( finalized_block, )?, - finality_branch: finality_branch.into(), - sync_aggregate, - signature_slot, - }), - ForkName::Gloas => Self::Gloas(LightClientFinalityUpdateGloas { - attested_header: LightClientHeaderGloas::block_to_light_client_header( - attested_block, - )?, - finalized_header: LightClientHeaderGloas::block_to_light_client_header( - finalized_block, - )?, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate, signature_slot, }), - - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(finality_update) @@ -193,7 +194,6 @@ impl LightClientFinalityUpdate { Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), Self::Fulu(_) => func(ForkName::Fulu), - Self::Gloas(_) => func(ForkName::Gloas), } } @@ -231,8 +231,8 @@ impl LightClientFinalityUpdate { Self::Electra(LightClientFinalityUpdateElectra::from_ssz_bytes(bytes)?) } ForkName::Fulu => Self::Fulu(LightClientFinalityUpdateFulu::from_ssz_bytes(bytes)?), - ForkName::Gloas => Self::Gloas(LightClientFinalityUpdateGloas::from_ssz_bytes(bytes)?), - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientFinalityUpdate decoding for {fork_name} not implemented" ))); @@ -253,7 +253,8 @@ impl LightClientFinalityUpdate { ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), ForkName::Fulu => as Encode>::ssz_fixed_len(), - ForkName::Gloas => as Encode>::ssz_fixed_len(), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => 0, }; // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) @@ -307,7 +308,11 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientFinalityU Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) + // TODO(EIP-7732): check if this is correct + return Err(serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: unsupported fork '{}'", + context + ))); } }) } @@ -345,10 +350,4 @@ mod tests { use crate::{LightClientFinalityUpdateFulu, MainnetEthSpec}; ssz_tests!(LightClientFinalityUpdateFulu); } - - #[cfg(test)] - mod gloas { - use crate::{LightClientFinalityUpdateGloas, MainnetEthSpec}; - ssz_tests!(LightClientFinalityUpdateGloas); - } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client/light_client_header.rs similarity index 78% rename from consensus/types/src/light_client_header.rs rename to consensus/types/src/light_client/light_client_header.rs index 162203138ab..fdf9f234efc 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client/light_client_header.rs @@ -1,37 +1,42 @@ -use crate::ChainSpec; -use crate::context_deserialize; -use crate::{BeaconBlockBody, light_client_update::*}; -use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; -use crate::{ContextDeserialize, ForkName}; -use crate::{ - EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderGloas, - FixedVector, Hash256, SignedBlindedBeaconBlock, test_utils::TestRandom, -}; -use derivative::Derivative; +use std::marker::PhantomData; + +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::Decode; use ssz_derive::{Decode, Encode}; -use std::marker::PhantomData; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + block::{BeaconBlockBody, BeaconBlockHeader, SignedBlindedBeaconBlock}, + core::{ChainSpec, EthSpec, Hash256}, + execution::{ + ExecutionPayloadHeader, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + }, + fork::ForkName, + light_client::{ExecutionPayloadProofLen, LightClientError, consts::EXECUTION_PAYLOAD_INDEX}, + test_utils::TestRandom, +}; + #[superstruct( - variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), + variants(Altair, Capella, Deneb, Electra, Fulu,), variant_attributes( derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -68,10 +73,8 @@ pub struct LightClientHeader { pub execution: ExecutionPayloadHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_header_fulu"))] pub execution: ExecutionPayloadHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "execution_payload_header_gloas"))] - pub execution: ExecutionPayloadHeaderGloas, - #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub execution_branch: FixedVector, #[ssz(skip_serializing, skip_deserializing)] @@ -85,12 +88,12 @@ impl LightClientHeader { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let header = match block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), ForkName::Altair | ForkName::Bellatrix => LightClientHeader::Altair( LightClientHeaderAltair::block_to_light_client_header(block)?, ), @@ -106,9 +109,8 @@ impl LightClientHeader { ForkName::Fulu => { LightClientHeader::Fulu(LightClientHeaderFulu::block_to_light_client_header(block)?) } - ForkName::Gloas => LightClientHeader::Gloas( - LightClientHeaderGloas::block_to_light_client_header(block)?, - ), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(header) } @@ -130,10 +132,8 @@ impl LightClientHeader { ForkName::Fulu => { LightClientHeader::Fulu(LightClientHeaderFulu::from_ssz_bytes(bytes)?) } - ForkName::Gloas => { - LightClientHeader::Gloas(LightClientHeaderGloas::from_ssz_bytes(bytes)?) - } - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientHeader decoding for {fork_name} not implemented" ))); @@ -152,7 +152,10 @@ impl LightClientHeader { } pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - if fork_name.capella_enabled() { + if fork_name.gloas_enabled() { + // TODO(EIP7732): check this + 0 + } else if fork_name.capella_enabled() { ExecutionPayloadHeader::::ssz_max_var_len_for_fork(fork_name) } else { 0 @@ -163,7 +166,7 @@ impl LightClientHeader { impl LightClientHeaderAltair { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { Ok(LightClientHeaderAltair { beacon: block.message().block_header(), _phantom_data: PhantomData, @@ -183,7 +186,7 @@ impl Default for LightClientHeaderAltair { impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -194,7 +197,7 @@ impl LightClientHeaderCapella { block .message() .body_capella() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -225,7 +228,7 @@ impl Default for LightClientHeaderCapella { impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let header = block .message() .execution_payload()? @@ -236,7 +239,7 @@ impl LightClientHeaderDeneb { block .message() .body_deneb() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -267,7 +270,7 @@ impl Default for LightClientHeaderDeneb { impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -278,7 +281,7 @@ impl LightClientHeaderElectra { block .message() .body_electra() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -309,7 +312,7 @@ impl Default for LightClientHeaderElectra { impl LightClientHeaderFulu { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, - ) -> Result { + ) -> Result { let payload = block .message() .execution_payload()? @@ -320,7 +323,7 @@ impl LightClientHeaderFulu { block .message() .body_fulu() - .map_err(|_| Error::BeaconBlockBodyError)? + .map_err(|_| LightClientError::BeaconBlockBodyError)? .to_owned(), ); @@ -348,48 +351,6 @@ impl Default for LightClientHeaderFulu { } } -impl LightClientHeaderGloas { - pub fn block_to_light_client_header( - block: &SignedBlindedBeaconBlock, - ) -> Result { - let payload = block - .message() - .execution_payload()? - .execution_payload_gloas()?; - - let header = ExecutionPayloadHeaderGloas::from(payload); - let beacon_block_body = BeaconBlockBody::from( - block - .message() - .body_gloas() - .map_err(|_| Error::BeaconBlockBodyError)? - .to_owned(), - ); - - let execution_branch = beacon_block_body - .to_ref() - .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; - - Ok(LightClientHeaderGloas { - beacon: block.message().block_header(), - execution: header, - execution_branch: FixedVector::new(execution_branch)?, - _phantom_data: PhantomData, - }) - } -} - -impl Default for LightClientHeaderGloas { - fn default() -> Self { - Self { - beacon: BeaconBlockHeader::empty(), - execution: ExecutionPayloadHeaderGloas::default(), - execution_branch: FixedVector::default(), - _phantom_data: PhantomData, - } - } -} - impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientHeader { fn context_deserialize(deserializer: D, context: ForkName) -> Result where @@ -402,7 +363,8 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientHeader )) }; Ok(match context { - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(serde::de::Error::custom(format!( "LightClientFinalityUpdate failed to deserialize: unsupported fork '{}'", context @@ -423,9 +385,6 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientHeader ForkName::Fulu => { Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } - ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) - } }) } } @@ -462,10 +421,4 @@ mod tests { use crate::{LightClientHeaderFulu, MainnetEthSpec}; ssz_tests!(LightClientHeaderFulu); } - - #[cfg(test)] - mod gloas { - use crate::{LightClientHeaderGloas, MainnetEthSpec}; - ssz_tests!(LightClientHeaderGloas); - } } diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client/light_client_optimistic_update.rs similarity index 85% rename from consensus/types/src/light_client_optimistic_update.rs rename to consensus/types/src/light_client/light_client_optimistic_update.rs index 7528322d567..139c4b6a08b 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client/light_client_optimistic_update.rs @@ -1,38 +1,42 @@ -use super::{ContextDeserialize, EthSpec, ForkName, LightClientHeader, Slot, SyncAggregate}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, LightClientHeaderFulu, LightClientHeaderGloas, - SignedBlindedBeaconBlock, light_client_update::*, -}; -use derivative::Derivative; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; -use ssz_derive::Decode; -use ssz_derive::Encode; +use ssz_derive::{Decode, Encode}; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash::Hash256; use tree_hash_derive::TreeHash; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, EthSpec, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::SyncAggregate, + test_utils::TestRandom, +}; + /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( - variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -64,8 +68,6 @@ pub struct LightClientOptimisticUpdate { pub attested_header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] pub attested_header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "attested_header_gloas"))] - pub attested_header: LightClientHeaderGloas, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -79,10 +81,10 @@ impl LightClientOptimisticUpdate { sync_aggregate: SyncAggregate, signature_slot: Slot, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let optimistic_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientOptimisticUpdateAltair { @@ -121,14 +123,8 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), - ForkName::Gloas => Self::Gloas(LightClientOptimisticUpdateGloas { - attested_header: LightClientHeaderGloas::block_to_light_client_header( - attested_block, - )?, - sync_aggregate, - signature_slot, - }), - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), }; Ok(optimistic_update) @@ -144,7 +140,6 @@ impl LightClientOptimisticUpdate { Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), Self::Fulu(_) => func(ForkName::Fulu), - Self::Gloas(_) => func(ForkName::Gloas), } } @@ -184,10 +179,8 @@ impl LightClientOptimisticUpdate { Self::Electra(LightClientOptimisticUpdateElectra::from_ssz_bytes(bytes)?) } ForkName::Fulu => Self::Fulu(LightClientOptimisticUpdateFulu::from_ssz_bytes(bytes)?), - ForkName::Gloas => { - Self::Gloas(LightClientOptimisticUpdateGloas::from_ssz_bytes(bytes)?) - } - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientOptimisticUpdate decoding for {fork_name} not implemented" ))); @@ -208,7 +201,8 @@ impl LightClientOptimisticUpdate { ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), ForkName::Fulu => as Encode>::ssz_fixed_len(), - ForkName::Gloas => as Encode>::ssz_fixed_len(), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => 0, }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -261,7 +255,11 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientOptimisti Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) + // TODO(EIP-7732): check if this is correct + return Err(serde::de::Error::custom(format!( + "LightClientBootstrap failed to deserialize: unsupported fork '{}'", + context + ))); } }) } @@ -299,10 +297,4 @@ mod tests { use crate::{LightClientOptimisticUpdateFulu, MainnetEthSpec}; ssz_tests!(LightClientOptimisticUpdateFulu); } - - #[cfg(test)] - mod gloas { - use crate::{LightClientOptimisticUpdateGloas, MainnetEthSpec}; - ssz_tests!(LightClientOptimisticUpdateGloas); - } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client/light_client_update.rs similarity index 79% rename from consensus/types/src/light_client_update.rs rename to consensus/types/src/light_client/light_client_update.rs index bf1a8c614a7..cd33f6ae547 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client/light_client_update.rs @@ -1,33 +1,30 @@ -use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; -use crate::LightClientHeader; -use crate::context_deserialize; -use crate::light_client_header::LightClientHeaderElectra; -use crate::{ - ChainSpec, ContextDeserialize, Epoch, ForkName, LightClientHeaderAltair, - LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderFulu, - LightClientHeaderGloas, SignedBlindedBeaconBlock, beacon_state, test_utils::TestRandom, -}; -use derivative::Derivative; +use std::sync::Arc; + +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use safe_arith::ArithError; use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::Decode; use ssz_derive::Encode; -use ssz_types::typenum::{U4, U5, U6, U7}; -use std::sync::Arc; +use ssz_types::FixedVector; use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use typenum::{U4, U5, U6, U7}; -pub const FINALIZED_ROOT_INDEX: usize = 105; -pub const CURRENT_SYNC_COMMITTEE_INDEX: usize = 54; -pub const NEXT_SYNC_COMMITTEE_INDEX: usize = 55; -pub const EXECUTION_PAYLOAD_INDEX: usize = 25; - -pub const FINALIZED_ROOT_INDEX_ELECTRA: usize = 169; -pub const CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 86; -pub const NEXT_SYNC_COMMITTEE_INDEX_ELECTRA: usize = 87; +use crate::{ + block::SignedBlindedBeaconBlock, + core::{ChainSpec, Epoch, EthSpec, Hash256, Slot}, + fork::ForkName, + light_client::{ + LightClientError, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + }, + sync_committee::{SyncAggregate, SyncCommittee}, + test_utils::TestRandom, +}; pub type FinalizedRootProofLen = U6; pub type CurrentSyncCommitteeProofLen = U5; @@ -38,82 +35,30 @@ pub type FinalizedRootProofLenElectra = U7; pub type CurrentSyncCommitteeProofLenElectra = U6; pub type NextSyncCommitteeProofLenElectra = U6; -pub const FINALIZED_ROOT_PROOF_LEN: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; -pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; - -pub const FINALIZED_ROOT_PROOF_LEN_ELECTRA: usize = 7; -pub const NEXT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; -pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN_ELECTRA: usize = 6; - -pub type MerkleProof = Vec; -// Max light client updates by range request limits -// spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/light-client/p2p-interface.md#configuration -pub const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; - type FinalityBranch = FixedVector; type FinalityBranchElectra = FixedVector; type NextSyncCommitteeBranch = FixedVector; type NextSyncCommitteeBranchElectra = FixedVector; -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - SszTypesError(ssz_types::Error), - MilhouseError(milhouse::Error), - BeaconStateError(beacon_state::Error), - ArithError(ArithError), - AltairForkNotActive, - NotEnoughSyncCommitteeParticipants, - MismatchingPeriods, - InvalidFinalizedBlock, - BeaconBlockBodyError, - InconsistentFork, -} - -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) - } -} - -impl From for Error { - fn from(e: beacon_state::Error) -> Error { - Error::BeaconStateError(e) - } -} - -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) - } -} - -impl From for Error { - fn from(e: milhouse::Error) -> Error { - Error::MilhouseError(e) - } -} - /// A LightClientUpdate is the update we request solely to either complete the bootstrapping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. #[superstruct( - variants(Altair, Capella, Deneb, Electra, Fulu, Gloas), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, Clone, - PartialEq, Serialize, Deserialize, - Derivative, + Educe, Decode, Encode, TestRandom, TreeHash, ), + educe(PartialEq), serde(bound = "E: EthSpec", deny_unknown_fields), cfg_attr( feature = "arbitrary", @@ -145,8 +90,6 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] pub attested_header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "attested_header_gloas"))] - pub attested_header: LightClientHeaderGloas, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, // Merkle proof for next sync committee @@ -156,7 +99,7 @@ pub struct LightClientUpdate { )] pub next_sync_committee_branch: NextSyncCommitteeBranch, #[superstruct( - only(Electra, Fulu, Gloas), + only(Electra, Fulu), partial_getter(rename = "next_sync_committee_branch_electra") )] pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, @@ -171,8 +114,6 @@ pub struct LightClientUpdate { pub finalized_header: LightClientHeaderElectra, #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] pub finalized_header: LightClientHeaderFulu, - #[superstruct(only(Gloas), partial_getter(rename = "finalized_header_gloas"))] - pub finalized_header: LightClientHeaderGloas, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), @@ -180,7 +121,7 @@ pub struct LightClientUpdate { )] pub finality_branch: FinalityBranch, #[superstruct( - only(Electra, Fulu, Gloas), + only(Electra, Fulu), partial_getter(rename = "finality_branch_electra") )] pub finality_branch: FinalityBranchElectra, @@ -199,7 +140,8 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientUpdate serde::de::Error::custom(format!("LightClientUpdate failed to deserialize: {:?}", e)) }; Ok(match context { - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(serde::de::Error::custom(format!( "LightClientUpdate failed to deserialize: unsupported fork '{}'", context @@ -220,9 +162,6 @@ impl<'de, E: EthSpec> ContextDeserialize<'de, ForkName> for LightClientUpdate ForkName::Fulu => { Self::Fulu(Deserialize::deserialize(deserializer).map_err(convert_err)?) } - ForkName::Gloas => { - Self::Gloas(Deserialize::deserialize(deserializer).map_err(convert_err)?) - } }) } } @@ -238,12 +177,12 @@ impl LightClientUpdate { attested_block: &SignedBlindedBeaconBlock, finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { let light_client_update = match attested_block .fork_name(chain_spec) - .map_err(|_| Error::InconsistentFork)? + .map_err(|_| LightClientError::InconsistentFork)? { - ForkName::Base => return Err(Error::AltairForkNotActive), + ForkName::Base => return Err(LightClientError::AltairForkNotActive), fork_name @ ForkName::Altair | fork_name @ ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; @@ -261,9 +200,13 @@ impl LightClientUpdate { Self::Altair(LightClientUpdateAltair { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -285,9 +228,13 @@ impl LightClientUpdate { Self::Capella(LightClientUpdateCapella { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -309,9 +256,13 @@ impl LightClientUpdate { Self::Deneb(LightClientUpdateDeneb { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -333,9 +284,13 @@ impl LightClientUpdate { Self::Electra(LightClientUpdateElectra { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) @@ -357,39 +312,22 @@ impl LightClientUpdate { Self::Fulu(LightClientUpdateFulu { attested_header, next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), + next_sync_committee_branch: next_sync_committee_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, finalized_header, - finality_branch: finality_branch.into(), + finality_branch: finality_branch + .try_into() + .map_err(LightClientError::SszTypesError)?, sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) } - fork_name @ ForkName::Gloas => { - let attested_header = - LightClientHeaderGloas::block_to_light_client_header(attested_block)?; - - let finalized_header = if let Some(finalized_block) = finalized_block { - if finalized_block.fork_name_unchecked() == fork_name { - LightClientHeaderGloas::block_to_light_client_header(finalized_block)? - } else { - LightClientHeaderGloas::default() - } - } else { - LightClientHeaderGloas::default() - }; - - Self::Gloas(LightClientUpdateGloas { - attested_header, - next_sync_committee, - next_sync_committee_branch: next_sync_committee_branch.into(), - finalized_header, - finality_branch: finality_branch.into(), - sync_aggregate: sync_aggregate.clone(), - signature_slot: block_slot, - }) - } // To add a new fork, just append the new fork variant on the latest fork. Forks that - // have a distinct execution header will need a new LightClientUpdate variant only - // if you need to test or support lightclient usages + // To add a new fork, just append the new fork variant on the latest fork. Forks that + // have a distinct execution header will need a new LightClientUpdate variant only + // if you need to test or support lightclient usages + // TODO(gloas): implement Gloas light client + ForkName::Gloas => return Err(LightClientError::GloasNotImplemented), }; Ok(light_client_update) @@ -404,8 +342,8 @@ impl LightClientUpdate { ForkName::Deneb => Self::Deneb(LightClientUpdateDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientUpdateElectra::from_ssz_bytes(bytes)?), ForkName::Fulu => Self::Fulu(LightClientUpdateFulu::from_ssz_bytes(bytes)?), - ForkName::Gloas => Self::Gloas(LightClientUpdateGloas::from_ssz_bytes(bytes)?), - ForkName::Base => { + // TODO(gloas): implement Gloas light client + ForkName::Base | ForkName::Gloas => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientUpdate decoding for {fork_name} not implemented" ))); @@ -422,7 +360,6 @@ impl LightClientUpdate { LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, LightClientUpdate::Fulu(update) => update.attested_header.beacon.slot, - LightClientUpdate::Gloas(update) => update.attested_header.beacon.slot, } } @@ -433,30 +370,38 @@ impl LightClientUpdate { LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, LightClientUpdate::Fulu(update) => update.finalized_header.beacon.slot, - LightClientUpdate::Gloas(update) => update.finalized_header.beacon.slot, } } fn attested_header_sync_committee_period( &self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + fn signature_slot_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) - .map_err(Error::ArithError) + .map_err(LightClientError::ArithError) } - pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + pub fn is_sync_committee_update( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok(!self.is_next_sync_committee_branch_empty() && (self.attested_header_sync_committee_period(chain_spec)? == self.signature_slot_sync_committee_period(chain_spec)?)) } - pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + pub fn has_sync_committee_finality( + &self, + chain_spec: &ChainSpec, + ) -> Result { Ok( compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? == self.attested_header_sync_committee_period(chain_spec)?, @@ -470,7 +415,7 @@ impl LightClientUpdate { &self, new: &Self, chain_spec: &ChainSpec, - ) -> Result { + ) -> Result { // Compare super majority (> 2/3) sync committee participation let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); @@ -554,7 +499,8 @@ impl LightClientUpdate { ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), ForkName::Fulu => as Encode>::ssz_fixed_len(), - ForkName::Gloas => as Encode>::ssz_fixed_len(), + // TODO(gloas): implement Gloas light client + ForkName::Gloas => 0, }; fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -569,7 +515,6 @@ impl LightClientUpdate { Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), Self::Fulu(_) => func(ForkName::Fulu), - Self::Gloas(_) => func(ForkName::Gloas), } } } @@ -594,7 +539,8 @@ fn compute_sync_committee_period_at_slot( #[cfg(test)] mod tests { use super::*; - use ssz_types::typenum::Unsigned; + use crate::light_client::consts::*; + use typenum::Unsigned; // `ssz_tests!` can only be defined once per namespace #[cfg(test)] @@ -632,13 +578,6 @@ mod tests { ssz_tests!(LightClientUpdateFulu); } - #[cfg(test)] - mod gloas { - use super::*; - use crate::MainnetEthSpec; - ssz_tests!(LightClientUpdateGloas); - } - #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); diff --git a/consensus/types/src/light_client/mod.rs b/consensus/types/src/light_client/mod.rs new file mode 100644 index 00000000000..24f3fdbb55e --- /dev/null +++ b/consensus/types/src/light_client/mod.rs @@ -0,0 +1,35 @@ +mod error; +mod light_client_bootstrap; +mod light_client_finality_update; +mod light_client_header; +mod light_client_optimistic_update; +mod light_client_update; + +pub mod consts; + +pub use error::LightClientError; +pub use light_client_bootstrap::{ + LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, +}; +pub use light_client_finality_update::{ + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, + LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, +}; +pub use light_client_header::{ + LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + LightClientHeaderElectra, LightClientHeaderFulu, +}; +pub use light_client_optimistic_update::{ + LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, + LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, +}; +pub use light_client_update::{ + CurrentSyncCommitteeProofLen, CurrentSyncCommitteeProofLenElectra, ExecutionPayloadProofLen, + FinalizedRootProofLen, FinalizedRootProofLenElectra, LightClientUpdate, + LightClientUpdateAltair, LightClientUpdateCapella, LightClientUpdateDeneb, + LightClientUpdateElectra, LightClientUpdateFulu, NextSyncCommitteeProofLen, + NextSyncCommitteeProofLenElectra, +}; diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs deleted file mode 100644 index f562322a3df..00000000000 --- a/consensus/types/src/runtime_fixed_vector.rs +++ /dev/null @@ -1,90 +0,0 @@ -//! Emulates a fixed size array but with the length set at runtime. -//! -//! The length of the list cannot be changed once it is set. - -use std::fmt; -use std::fmt::Debug; - -#[derive(Clone)] -pub struct RuntimeFixedVector { - vec: Vec, - len: usize, -} - -impl Debug for RuntimeFixedVector { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (len={})", self.vec, self.len) - } -} - -impl RuntimeFixedVector { - pub fn new(vec: Vec) -> Self { - let len = vec.len(); - Self { vec, len } - } - - pub fn to_vec(&self) -> Vec { - self.vec.clone() - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - #[allow(clippy::len_without_is_empty)] - pub fn len(&self) -> usize { - self.len - } - - pub fn into_vec(self) -> Vec { - self.vec - } - - pub fn default(max_len: usize) -> Self { - Self { - vec: vec![T::default(); max_len], - len: max_len, - } - } - - pub fn take(&mut self) -> Self { - let new = std::mem::take(&mut self.vec); - *self = Self::new(vec![T::default(); self.len]); - Self { - vec: new, - len: self.len, - } - } -} - -impl std::ops::Deref for RuntimeFixedVector { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl std::ops::DerefMut for RuntimeFixedVector { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - -impl IntoIterator for RuntimeFixedVector { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl<'a, T> IntoIterator for &'a RuntimeFixedVector { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.vec.iter() - } -} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs deleted file mode 100644 index d57c65b1b71..00000000000 --- a/consensus/types/src/runtime_var_list.rs +++ /dev/null @@ -1,387 +0,0 @@ -use crate::ContextDeserialize; -use derivative::Derivative; -use serde::de::Error as DeError; -use serde::{Deserialize, Deserializer, Serialize}; -use ssz::Decode; -use ssz_types::Error; -use std::fmt; -use std::fmt::Debug; -use std::ops::{Deref, Index, IndexMut}; -use std::slice::SliceIndex; -use tree_hash::{Hash256, MerkleHasher, PackedEncoding, TreeHash, TreeHashType}; - -/// Emulates a SSZ `List`. -/// -/// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than -/// `max_len` values. -/// -/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. -/// -/// ## Example -/// -/// ``` -/// use types::{RuntimeVariableList}; -/// -/// let base: Vec = vec![1, 2, 3, 4]; -/// -/// // Create a `RuntimeVariableList` from a `Vec` that has the expected length. -/// let exact: RuntimeVariableList<_> = RuntimeVariableList::new(base.clone(), 4).unwrap(); -/// assert_eq!(&exact[..], &[1, 2, 3, 4]); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is too long you'll get an error. -/// let err = RuntimeVariableList::new(base.clone(), 3).unwrap_err(); -/// assert_eq!(err, ssz_types::Error::OutOfBounds { i: 4, len: 3 }); -/// -/// // Create a `RuntimeVariableList` from a `Vec` that is shorter than the maximum. -/// let mut long: RuntimeVariableList<_> = RuntimeVariableList::new(base, 5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4]); -/// -/// // Push a value to if it does not exceed the maximum -/// long.push(5).unwrap(); -/// assert_eq!(&long[..], &[1, 2, 3, 4, 5]); -/// -/// // Push a value to if it _does_ exceed the maximum. -/// assert!(long.push(6).is_err()); -/// -/// ``` -#[derive(Clone, Serialize, Deserialize, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] -#[serde(transparent)] -pub struct RuntimeVariableList { - vec: Vec, - #[serde(skip)] - max_len: usize, -} - -impl Debug for RuntimeVariableList { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?} (max_len={})", self.vec, self.max_len) - } -} - -impl RuntimeVariableList { - /// Returns `Ok` if the given `vec` equals the fixed length of `Self`. Otherwise returns - /// `Err(OutOfBounds { .. })`. - pub fn new(vec: Vec, max_len: usize) -> Result { - if vec.len() <= max_len { - Ok(Self { vec, max_len }) - } else { - Err(Error::OutOfBounds { - i: vec.len(), - len: max_len, - }) - } - } - - /// Create an empty list with the given `max_len`. - pub fn empty(max_len: usize) -> Self { - Self { - vec: vec![], - max_len, - } - } - - pub fn as_slice(&self) -> &[T] { - self.vec.as_slice() - } - - pub fn as_mut_slice(&mut self) -> &mut [T] { - self.vec.as_mut_slice() - } - - /// Returns the number of values presently in `self`. - pub fn len(&self) -> usize { - self.vec.len() - } - - /// True if `self` does not contain any values. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns the type-level maximum length. - /// - /// Returns `None` if self is uninitialized with a max_len. - pub fn max_len(&self) -> usize { - self.max_len - } - - /// Appends `value` to the back of `self`. - /// - /// Returns `Err(())` when appending `value` would exceed the maximum length. - pub fn push(&mut self, value: T) -> Result<(), Error> { - if self.vec.len() < self.max_len { - self.vec.push(value); - Ok(()) - } else { - Err(Error::OutOfBounds { - i: self.vec.len().saturating_add(1), - len: self.max_len, - }) - } - } -} - -impl RuntimeVariableList { - pub fn from_ssz_bytes(bytes: &[u8], max_len: usize) -> Result { - let vec = if bytes.is_empty() { - vec![] - } else if ::is_ssz_fixed_len() { - let num_items = bytes - .len() - .checked_div(::ssz_fixed_len()) - .ok_or(ssz::DecodeError::ZeroLengthItem)?; - - if num_items > max_len { - return Err(ssz::DecodeError::BytesInvalid(format!( - "RuntimeVariableList of {} items exceeds maximum of {}", - num_items, max_len - ))); - } - - bytes.chunks(::ssz_fixed_len()).try_fold( - Vec::with_capacity(num_items), - |mut vec, chunk| { - vec.push(::from_ssz_bytes(chunk)?); - Ok(vec) - }, - )? - } else { - ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? - }; - Ok(Self { vec, max_len }) - } -} - -impl From> for Vec { - fn from(list: RuntimeVariableList) -> Vec { - list.vec - } -} - -impl> Index for RuntimeVariableList { - type Output = I::Output; - - #[inline] - fn index(&self, index: I) -> &Self::Output { - Index::index(&self.vec, index) - } -} - -impl> IndexMut for RuntimeVariableList { - #[inline] - fn index_mut(&mut self, index: I) -> &mut Self::Output { - IndexMut::index_mut(&mut self.vec, index) - } -} - -impl Deref for RuntimeVariableList { - type Target = [T]; - - fn deref(&self) -> &[T] { - &self.vec[..] - } -} - -impl<'a, T> IntoIterator for &'a RuntimeVariableList { - type Item = &'a T; - type IntoIter = std::slice::Iter<'a, T>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl IntoIterator for RuntimeVariableList { - type Item = T; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.vec.into_iter() - } -} - -impl ssz::Encode for RuntimeVariableList -where - T: ssz::Encode, -{ - fn is_ssz_fixed_len() -> bool { - >::is_ssz_fixed_len() - } - - fn ssz_append(&self, buf: &mut Vec) { - self.vec.ssz_append(buf) - } - - fn ssz_fixed_len() -> usize { - >::ssz_fixed_len() - } - - fn ssz_bytes_len(&self) -> usize { - self.vec.ssz_bytes_len() - } -} - -impl<'de, C, T> ContextDeserialize<'de, (C, usize)> for RuntimeVariableList -where - T: ContextDeserialize<'de, C>, - C: Clone, -{ - fn context_deserialize(deserializer: D, context: (C, usize)) -> Result - where - D: Deserializer<'de>, - { - // first parse out a Vec using the Vec impl you already have - let vec: Vec = Vec::context_deserialize(deserializer, context.0)?; - let vec_len = vec.len(); - RuntimeVariableList::new(vec, context.1).map_err(|e| { - DeError::custom(format!( - "RuntimeVariableList length {} exceeds max_len {}: {e:?}", - vec_len, context.1, - )) - }) - } -} - -impl TreeHash for RuntimeVariableList { - fn tree_hash_type() -> tree_hash::TreeHashType { - tree_hash::TreeHashType::List - } - - fn tree_hash_packed_encoding(&self) -> PackedEncoding { - unreachable!("List should never be packed.") - } - - fn tree_hash_packing_factor() -> usize { - unreachable!("List should never be packed.") - } - - fn tree_hash_root(&self) -> Hash256 { - let root = runtime_vec_tree_hash_root::(&self.vec, self.max_len); - - tree_hash::mix_in_length(&root, self.len()) - } -} - -// We can delete this once the upstream `vec_tree_hash_root` is modified to use a runtime max len. -pub fn runtime_vec_tree_hash_root(vec: &[T], max_len: usize) -> Hash256 -where - T: TreeHash, -{ - match T::tree_hash_type() { - TreeHashType::Basic => { - let mut hasher = - MerkleHasher::with_leaves(max_len.div_ceil(T::tree_hash_packing_factor())); - - for item in vec { - hasher - .write(&item.tree_hash_packed_encoding()) - .expect("ssz_types variable vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types variable vec should not have a remaining buffer") - } - TreeHashType::Container | TreeHashType::List | TreeHashType::Vector => { - let mut hasher = MerkleHasher::with_leaves(max_len); - - for item in vec { - hasher - .write(item.tree_hash_root().as_slice()) - .expect("ssz_types vec should not contain more elements than max"); - } - - hasher - .finish() - .expect("ssz_types vec should not have a remaining buffer") - } - } -} - -#[cfg(test)] -mod test { - use super::*; - use ssz::*; - use std::fmt::Debug; - - #[test] - fn new() { - let vec = vec![42; 5]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_err()); - - let vec = vec![42; 3]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - - let vec = vec![42; 4]; - let fixed: Result, _> = RuntimeVariableList::new(vec, 4); - assert!(fixed.is_ok()); - } - - #[test] - fn indexing() { - let vec = vec![1, 2]; - - let mut fixed: RuntimeVariableList = - RuntimeVariableList::new(vec.clone(), 8192).unwrap(); - - assert_eq!(fixed[0], 1); - assert_eq!(&fixed[0..1], &vec[0..1]); - assert_eq!(fixed[..].len(), 2); - - fixed[1] = 3; - assert_eq!(fixed[1], 3); - } - - #[test] - fn length() { - // Too long. - let vec = vec![42; 5]; - let err = RuntimeVariableList::::new(vec.clone(), 4).unwrap_err(); - assert_eq!(err, Error::OutOfBounds { i: 5, len: 4 }); - - let vec = vec![42; 3]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec.clone(), 4).unwrap(); - assert_eq!(&fixed[0..3], &vec[..]); - assert_eq!(&fixed[..], &vec![42, 42, 42][..]); - - let vec = vec![]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - assert_eq!(&fixed[..], &[] as &[u64]); - } - - #[test] - fn deref() { - let vec = vec![0, 2, 4, 6]; - let fixed: RuntimeVariableList = RuntimeVariableList::new(vec, 4).unwrap(); - - assert_eq!(fixed.first(), Some(&0)); - assert_eq!(fixed.get(3), Some(&6)); - assert_eq!(fixed.get(4), None); - } - - #[test] - fn encode() { - let vec: RuntimeVariableList = RuntimeVariableList::new(vec![0; 2], 2).unwrap(); - assert_eq!(vec.as_ssz_bytes(), vec![0, 0, 0, 0]); - assert_eq!( as Encode>::ssz_fixed_len(), 4); - } - - fn round_trip(item: RuntimeVariableList) { - let max_len = item.max_len(); - let encoded = &item.as_ssz_bytes(); - assert_eq!(item.ssz_bytes_len(), encoded.len()); - assert_eq!( - RuntimeVariableList::from_ssz_bytes(encoded, max_len), - Ok(item) - ); - } - - #[test] - fn u16_len_8() { - round_trip::(RuntimeVariableList::new(vec![42; 8], 8).unwrap()); - round_trip::(RuntimeVariableList::new(vec![0; 8], 8).unwrap()); - } -} diff --git a/consensus/types/src/attester_slashing.rs b/consensus/types/src/slashing/attester_slashing.rs similarity index 93% rename from consensus/types/src/attester_slashing.rs rename to consensus/types/src/slashing/attester_slashing.rs index adc3695f4a4..5c214b35f74 100644 --- a/consensus/types/src/attester_slashing.rs +++ b/consensus/types/src/slashing/attester_slashing.rs @@ -1,10 +1,5 @@ -use crate::context_deserialize; -use crate::indexed_attestation::{ - IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef, -}; -use crate::{ContextDeserialize, ForkName}; -use crate::{EthSpec, test_utils::TestRandom}; -use derivative::Derivative; +use context_deserialize::{ContextDeserialize, context_deserialize}; +use educe::Educe; use rand::{Rng, RngCore}; use serde::{Deserialize, Deserializer, Serialize}; use ssz_derive::{Decode, Encode}; @@ -12,11 +7,18 @@ use superstruct::superstruct; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::{IndexedAttestationBase, IndexedAttestationElectra, IndexedAttestationRef}, + core::EthSpec, + fork::ForkName, + test_utils::TestRandom, +}; + #[superstruct( variants(Base, Electra), variant_attributes( derive( - Derivative, + Educe, Debug, Clone, Serialize, @@ -27,7 +29,7 @@ use tree_hash_derive::TreeHash; TestRandom, ), context_deserialize(ForkName), - derivative(PartialEq, Eq, Hash(bound = "E: EthSpec")), + educe(PartialEq, Eq, Hash(bound(E: EthSpec))), serde(bound = "E: EthSpec"), cfg_attr( feature = "arbitrary", @@ -42,8 +44,8 @@ use tree_hash_derive::TreeHash; derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Encode, Deserialize, TreeHash, Educe)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec", untagged)] #[ssz(enum_behaviour = "transparent")] #[tree_hash(enum_behaviour = "transparent")] @@ -57,8 +59,8 @@ pub struct AttesterSlashing { /// This is a copy of the `AttesterSlashing` enum but with `Encode` and `Decode` derived /// using the `union` behavior for the purposes of persistence on disk. We use a separate /// type so that we don't accidentally use this non-spec encoding in consensus objects. -#[derive(Debug, Clone, Encode, Decode, Derivative)] -#[derivative(PartialEq, Eq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Encode, Decode, Educe)] +#[educe(PartialEq, Eq, Hash(bound(E: EthSpec)))] #[ssz(enum_behaviour = "union")] pub enum AttesterSlashingOnDisk { Base(AttesterSlashingBase), diff --git a/consensus/types/src/slashing/mod.rs b/consensus/types/src/slashing/mod.rs new file mode 100644 index 00000000000..551b8e31377 --- /dev/null +++ b/consensus/types/src/slashing/mod.rs @@ -0,0 +1,8 @@ +mod attester_slashing; +mod proposer_slashing; + +pub use attester_slashing::{ + AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, AttesterSlashingOnDisk, + AttesterSlashingRef, AttesterSlashingRefOnDisk, +}; +pub use proposer_slashing::ProposerSlashing; diff --git a/consensus/types/src/proposer_slashing.rs b/consensus/types/src/slashing/proposer_slashing.rs similarity index 86% rename from consensus/types/src/proposer_slashing.rs rename to consensus/types/src/slashing/proposer_slashing.rs index f4d914c1e59..697bd1a9aa5 100644 --- a/consensus/types/src/proposer_slashing.rs +++ b/consensus/types/src/slashing/proposer_slashing.rs @@ -1,12 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedBeaconBlockHeader}; - +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{block::SignedBeaconBlockHeader, fork::ForkName, test_utils::TestRandom}; + /// Two conflicting proposals from the same proposer (validator). /// /// Spec v0.12.1 diff --git a/consensus/types/src/activation_queue.rs b/consensus/types/src/state/activation_queue.rs similarity index 95% rename from consensus/types/src/activation_queue.rs rename to consensus/types/src/state/activation_queue.rs index dd3ce5f88cb..0d920a20cf0 100644 --- a/consensus/types/src/activation_queue.rs +++ b/consensus/types/src/state/activation_queue.rs @@ -1,6 +1,10 @@ -use crate::{ChainSpec, Epoch, Validator}; use std::collections::BTreeSet; +use crate::{ + core::{ChainSpec, Epoch}, + validator::Validator, +}; + /// Activation queue computed during epoch processing for use in the *next* epoch. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, PartialEq, Eq, Default, Clone)] diff --git a/consensus/types/src/beacon_state/balance.rs b/consensus/types/src/state/balance.rs similarity index 100% rename from consensus/types/src/beacon_state/balance.rs rename to consensus/types/src/state/balance.rs diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/state/beacon_state.rs similarity index 86% rename from consensus/types/src/beacon_state.rs rename to consensus/types/src/state/beacon_state.rs index 9c4e50dc613..c1b6f0dc0c1 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/state/beacon_state.rs @@ -1,50 +1,57 @@ -use self::committee_cache::get_active_validator_indices; -use crate::ContextDeserialize; -use crate::FixedBytesExtended; -use crate::historical_summary::HistoricalSummary; -use crate::test_utils::TestRandom; -use crate::*; +use std::{fmt, hash::Hash, mem, sync::Arc}; + +use bls::{AggregatePublicKey, PublicKeyBytes, Signature}; use compare_fields::CompareFields; -use compare_fields_derive::CompareFields; -use derivative::Derivative; +use context_deserialize::ContextDeserialize; +use educe::Educe; use ethereum_hashing::hash; +use fixed_bytes::FixedBytesExtended; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; use metastruct::{NumFields, metastruct}; -pub use pubkey_cache::PubkeyCache; +use milhouse::{List, Vector}; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize}; use ssz::{Decode, DecodeError, Encode, ssz_encode}; use ssz_derive::{Decode, Encode}; -use std::hash::Hash; -use std::{fmt, mem, sync::Arc}; +use ssz_types::{BitVector, FixedVector}; use superstruct::superstruct; use swap_or_not_shuffle::compute_shuffled_index; use test_random_derive::TestRandom; +use tracing::instrument; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use typenum::Unsigned; -pub use self::committee_cache::{ - CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, - epoch_committee_count, +use crate::{ + BuilderPendingPayment, BuilderPendingWithdrawal, ExecutionBlockHash, ExecutionPayloadBid, + attestation::{ + AttestationDuty, BeaconCommittee, Checkpoint, CommitteeIndex, ParticipationFlags, + PendingAttestation, + }, + block::{BeaconBlock, BeaconBlockHeader, SignedBeaconBlockHash}, + consolidation::PendingConsolidation, + core::{ChainSpec, Domain, Epoch, EthSpec, Hash256, RelativeEpoch, RelativeEpochError, Slot}, + deposit::PendingDeposit, + execution::{ + Eth1Data, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, + }, + fork::{Fork, ForkName, ForkVersionDecode, InconsistentFork, map_fork_name}, + light_client::consts::{ + CURRENT_SYNC_COMMITTEE_INDEX, CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA, FINALIZED_ROOT_INDEX, + FINALIZED_ROOT_INDEX_ELECTRA, NEXT_SYNC_COMMITTEE_INDEX, NEXT_SYNC_COMMITTEE_INDEX_ELECTRA, + }, + state::{ + BlockRootsIter, CommitteeCache, EpochCache, EpochCacheError, ExitCache, HistoricalBatch, + HistoricalSummary, ProgressiveBalancesCache, PubkeyCache, SlashingsCache, + get_active_validator_indices, + }, + sync_committee::{SyncCommittee, SyncDuty}, + test_utils::TestRandom, + validator::Validator, + withdrawal::PendingPartialWithdrawal, }; -pub use crate::beacon_state::balance::Balance; -pub use crate::beacon_state::exit_cache::ExitCache; -pub use crate::beacon_state::progressive_balances_cache::*; -pub use crate::beacon_state::slashings_cache::SlashingsCache; -pub use eth_spec::*; -pub use iter::BlockRootsIter; -pub use milhouse::{List, Vector, interface::Interface}; -use tracing::instrument; - -#[macro_use] -mod committee_cache; -mod balance; -mod exit_cache; -mod iter; -mod progressive_balances_cache; -mod pubkey_cache; -mod slashings_cache; -mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; @@ -54,7 +61,7 @@ pub type Validators = List::ValidatorRegistryLimit> pub type Balances = List::ValidatorRegistryLimit>; #[derive(Debug, PartialEq, Clone)] -pub enum Error { +pub enum BeaconStateError { /// A state for a different hard-fork was required -- a severe logic error. IncorrectStateVariant, EpochOutOfBounds, @@ -198,7 +205,7 @@ enum AllowNextEpoch { } impl AllowNextEpoch { - fn upper_bound_of(self, current_epoch: Epoch) -> Result { + fn upper_bound_of(self, current_epoch: Epoch) -> Result { match self { AllowNextEpoch::True => Ok(current_epoch.safe_add(1)?), AllowNextEpoch::False => Ok(current_epoch), @@ -246,7 +253,7 @@ impl From for Hash256 { variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu, Gloas), variant_attributes( derive( - Derivative, + Educe, Debug, PartialEq, Serialize, @@ -263,7 +270,7 @@ impl From for Hash256 { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") ), - derivative(Clone), + educe(Clone), ), specific_variant_attributes( Base(metastruct( @@ -379,8 +386,14 @@ impl From for Hash256 { num_fields(all()), )) ), - cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant"), + cast_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), + partial_getter_error( + ty = "BeaconStateError", + expr = "BeaconStateError::IncorrectStateVariant" + ), map_ref_mut_into(BeaconStateRef) )] #[cfg_attr( @@ -530,14 +543,9 @@ where )] #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, - #[superstruct( - only(Gloas), - partial_getter(rename = "latest_execution_payload_header_gloas") - )] + #[superstruct(only(Gloas))] #[metastruct(exclude_from(tree_lists))] - pub latest_execution_payload_header: ExecutionPayloadHeaderGloas, - - // Capella + pub latest_execution_payload_bid: ExecutionPayloadBid, #[superstruct(only(Capella, Deneb, Electra, Fulu, Gloas), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] @@ -596,6 +604,31 @@ where pub proposer_lookahead: Vector, // Gloas + #[test_random(default)] + #[superstruct(only(Gloas))] + #[metastruct(exclude_from(tree_lists))] + pub execution_payload_availability: BitVector, + + #[compare_fields(as_iter)] + #[test_random(default)] + #[superstruct(only(Gloas))] + pub builder_pending_payments: Vector, + + #[compare_fields(as_iter)] + #[test_random(default)] + #[superstruct(only(Gloas))] + pub builder_pending_withdrawals: + List, + + #[test_random(default)] + #[superstruct(only(Gloas))] + #[metastruct(exclude_from(tree_lists))] + pub latest_block_hash: ExecutionBlockHash, + + #[test_random(default)] + #[superstruct(only(Gloas))] + #[metastruct(exclude_from(tree_lists))] + pub latest_withdrawals_root: Hash256, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] @@ -741,11 +774,11 @@ impl BeaconState { } /// Returns the `tree_hash_root` of the state. - pub fn canonical_root(&mut self) -> Result { + pub fn canonical_root(&mut self) -> Result { self.update_tree_hash_cache() } - pub fn historical_batch(&mut self) -> Result, Error> { + pub fn historical_batch(&mut self) -> Result, BeaconStateError> { // Updating before cloning makes the clone cheap and saves repeated hashing. self.block_roots_mut().apply_updates()?; self.state_roots_mut().apply_updates()?; @@ -759,7 +792,10 @@ impl BeaconState { /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator /// exists in the registry. If a validator pubkey exists in the validator registry, returns `Some(i)`, /// otherwise returns `None`. - pub fn get_validator_index(&mut self, pubkey: &PublicKeyBytes) -> Result, Error> { + pub fn get_validator_index( + &mut self, + pubkey: &PublicKeyBytes, + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; Ok(self.pubkey_cache().get(pubkey)) } @@ -784,7 +820,7 @@ impl BeaconState { /// The epoch following `self.current_epoch()`. /// /// Spec v0.12.1 - pub fn next_epoch(&self) -> Result { + pub fn next_epoch(&self) -> Result { Ok(self.current_epoch().safe_add(1)?) } @@ -793,7 +829,7 @@ impl BeaconState { /// Makes use of the committee cache and will fail if no cache exists for the slot's epoch. /// /// Spec v0.12.1 - pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { + pub fn get_committee_count_at_slot(&self, slot: Slot) -> Result { let cache = self.committee_cache_at_slot(slot)?; Ok(cache.committees_per_slot()) } @@ -801,7 +837,10 @@ impl BeaconState { /// Compute the number of committees in an entire epoch. /// /// Spec v0.12.1 - pub fn get_epoch_committee_count(&self, relative_epoch: RelativeEpoch) -> Result { + pub fn get_epoch_committee_count( + &self, + relative_epoch: RelativeEpoch, + ) -> Result { let cache = self.committee_cache(relative_epoch)?; Ok(cache.epoch_committee_count() as u64) } @@ -814,7 +853,7 @@ impl BeaconState { pub fn get_cached_active_validator_indices( &self, relative_epoch: RelativeEpoch, - ) -> Result<&[usize], Error> { + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.active_validator_indices()) @@ -827,7 +866,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if epoch >= self.compute_activation_exit_epoch(self.current_epoch(), spec)? { Err(BeaconStateError::EpochOutOfBounds) } else { @@ -840,7 +879,10 @@ impl BeaconState { /// Note: the indices are shuffled (i.e., not in ascending order). /// /// Returns an error if that epoch is not cached, or the cache is not initialized. - pub fn get_shuffling(&self, relative_epoch: RelativeEpoch) -> Result<&[usize], Error> { + pub fn get_shuffling( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&[usize], BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.shuffling()) @@ -855,14 +897,14 @@ impl BeaconState { &self, slot: Slot, index: CommitteeIndex, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; let cache = self.committee_cache(relative_epoch)?; cache .get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) } /// Get all of the Beacon committees at a given slot. @@ -873,7 +915,7 @@ impl BeaconState { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache_at_slot(slot)?; cache.get_beacon_committees_at_slot(slot) } @@ -886,7 +928,7 @@ impl BeaconState { pub fn get_beacon_committees_at_epoch( &self, relative_epoch: RelativeEpoch, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; cache.get_all_beacon_committees() } @@ -902,7 +944,7 @@ impl BeaconState { epoch: Epoch, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { let decision_slot = spec.proposer_shuffling_decision_slot::(epoch); if self.slot() <= decision_slot { Ok(block_root) @@ -918,7 +960,7 @@ impl BeaconState { &self, epoch: Epoch, head_block_root: Hash256, - ) -> Result { + ) -> Result { let decision_slot = epoch.saturating_sub(1u64).end_slot(E::slots_per_epoch()); if self.slot() <= decision_slot { Ok(head_block_root) @@ -938,11 +980,14 @@ impl BeaconState { &self, block_root: Hash256, spec: &ChainSpec, - ) -> Result { + ) -> Result { self.proposer_shuffling_decision_root_at_epoch(self.current_epoch(), block_root, spec) } - pub fn epoch_cache_decision_root(&self, block_root: Hash256) -> Result { + pub fn epoch_cache_decision_root( + &self, + block_root: Hash256, + ) -> Result { // Epoch cache decision root for the current epoch (N) is the block root at the end of epoch // N - 1. This is the same as the root that determines the next epoch attester shuffling. self.attester_shuffling_decision_root(block_root, RelativeEpoch::Next) @@ -959,7 +1004,7 @@ impl BeaconState { &self, block_root: Hash256, relative_epoch: RelativeEpoch, - ) -> Result { + ) -> Result { let decision_slot = self.attester_shuffling_decision_slot(relative_epoch); if self.slot() == decision_slot { Ok(block_root) @@ -986,9 +1031,9 @@ impl BeaconState { indices: &[usize], seed: &[u8], spec: &ChainSpec, - ) -> Result { + ) -> Result { if indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); @@ -1006,10 +1051,10 @@ impl BeaconState { seed, spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; if effective_balance.safe_mul(max_random_value)? @@ -1028,11 +1073,11 @@ impl BeaconState { seed: &[u8], indices: &[usize], spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Regardless of fork, we never support computing proposer indices for past epochs. let current_epoch = self.current_epoch(); if epoch < current_epoch { - return Err(Error::ComputeProposerIndicesPastEpoch { + return Err(BeaconStateError::ComputeProposerIndicesPastEpoch { current_epoch, request_epoch: epoch, }); @@ -1051,17 +1096,19 @@ impl BeaconState { if self.fork_name_unchecked().fulu_enabled() && epoch < current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesInsufficientLookahead { - current_epoch, - request_epoch: epoch, - }); + return Err( + BeaconStateError::ComputeProposerIndicesInsufficientLookahead { + current_epoch, + request_epoch: epoch, + }, + ); } } else { // Pre-Fulu the situation is reversed, we *should not* compute proposer indices using // too much lookahead. To do so would make us vulnerable to changes in the proposer // indices caused by effective balance changes. if epoch >= current_epoch.safe_add(spec.min_seed_lookahead)? { - return Err(Error::ComputeProposerIndicesExcessiveLookahead { + return Err(BeaconStateError::ComputeProposerIndicesExcessiveLookahead { current_epoch, request_epoch: epoch, }); @@ -1084,7 +1131,7 @@ impl BeaconState { /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. /// /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. - fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { if self.fork_name_unchecked().electra_enabled() { Self::shuffling_random_u16_electra(i, seed).map(u64::from) } else { @@ -1095,37 +1142,39 @@ impl BeaconState { /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. - fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_byte(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(32)? as u64)); let index = i.safe_rem(32)?; hash(&preimage) .get(index) .copied() - .ok_or(Error::ShuffleIndexOutOfBounds(index)) + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(index)) } /// Get two random bytes from the given `seed`. /// /// This is used in place of `shuffling_random_byte` from Electra onwards. - fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { let mut preimage = seed.to_vec(); preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); let offset = i.safe_rem(16)?.safe_mul(2)?; hash(&preimage) .get(offset..offset.safe_add(2)?) - .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(offset))? .try_into() .map(u16::from_le_bytes) - .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + .map_err(|_| BeaconStateError::ShuffleIndexOutOfBounds(offset)) } /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header( &self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRef::Bellatrix( &state.latest_execution_payload_header, )), @@ -1141,17 +1190,18 @@ impl BeaconState { BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRef::Fulu( &state.latest_execution_payload_header, )), - BeaconState::Gloas(state) => Ok(ExecutionPayloadHeaderRef::Gloas( - &state.latest_execution_payload_header, - )), + // TODO(EIP-7732): investigate calling functions + BeaconState::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } pub fn latest_execution_payload_header_mut( &mut self, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { match self { - BeaconState::Base(_) | BeaconState::Altair(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) | BeaconState::Altair(_) => { + Err(BeaconStateError::IncorrectStateVariant) + } BeaconState::Bellatrix(state) => Ok(ExecutionPayloadHeaderRefMut::Bellatrix( &mut state.latest_execution_payload_header, )), @@ -1167,9 +1217,8 @@ impl BeaconState { BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRefMut::Fulu( &mut state.latest_execution_payload_header, )), - BeaconState::Gloas(state) => Ok(ExecutionPayloadHeaderRefMut::Gloas( - &mut state.latest_execution_payload_header, - )), + // TODO(EIP-7732): investigate calling functions + BeaconState::Gloas(_) => Err(BeaconStateError::IncorrectStateVariant), } } @@ -1182,7 +1231,7 @@ impl BeaconState { index: CommitteeIndex, slot_signature: &Signature, spec: &ChainSpec, - ) -> Result { + ) -> Result { let committee = self.get_beacon_committee(slot, index)?; let modulo = std::cmp::max( 1, @@ -1193,7 +1242,7 @@ impl BeaconState { signature_hash .get(0..8) .and_then(|bytes| bytes.try_into().ok()) - .ok_or(Error::IsAggregatorOutOfBounds)?, + .ok_or(BeaconStateError::IsAggregatorOutOfBounds)?, ); Ok(signature_hash_int.safe_rem(modulo)? == 0) @@ -1202,13 +1251,17 @@ impl BeaconState { /// Returns the beacon proposer index for the `slot` in `self.current_epoch()`. /// /// Spec v1.6.0-alpha.1 - pub fn get_beacon_proposer_index(&self, slot: Slot, spec: &ChainSpec) -> Result { + pub fn get_beacon_proposer_index( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result { // Proposer indices are only known for the current epoch, due to the dependence on the // effective balances of validators, which change at every epoch transition. let epoch = slot.epoch(E::slots_per_epoch()); // TODO(EIP-7917): Explore allowing this function to be called with a slot one epoch in the future. if epoch != self.current_epoch() { - return Err(Error::SlotOutOfBounds); + return Err(BeaconStateError::SlotOutOfBounds); } if let Ok(proposer_lookahead) = self.proposer_lookahead() { @@ -1216,7 +1269,7 @@ impl BeaconState { let index = slot.as_usize().safe_rem(E::slots_per_epoch() as usize)?; proposer_lookahead .get(index) - .ok_or(Error::ProposerLookaheadOutOfBounds { i: index }) + .ok_or(BeaconStateError::ProposerLookaheadOutOfBounds { i: index }) .map(|index| *index as usize) } else { // Pre-Fulu @@ -1234,7 +1287,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // This isn't in the spec, but we remove the footgun that is requesting the current epoch // for a Fulu state. if let Ok(proposer_lookahead) = self.proposer_lookahead() @@ -1264,7 +1317,11 @@ impl BeaconState { /// Compute the seed to use for the beacon proposer selection at the given `slot`. /// /// Spec v0.12.1 - pub fn get_beacon_proposer_seed(&self, slot: Slot, spec: &ChainSpec) -> Result, Error> { + pub fn get_beacon_proposer_seed( + &self, + slot: Slot, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let mut preimage = self .get_seed(epoch, Domain::BeaconProposer, spec)? @@ -1279,7 +1336,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result<&Arc>, Error> { + ) -> Result<&Arc>, BeaconStateError> { let sync_committee_period = epoch.sync_committee_period(spec)?; let current_sync_committee_period = self.current_epoch().sync_committee_period(spec)?; let next_sync_committee_period = current_sync_committee_period.safe_add(1)?; @@ -1289,7 +1346,7 @@ impl BeaconState { } else if sync_committee_period == next_sync_committee_period { self.next_sync_committee() } else { - Err(Error::SyncCommitteeNotKnown { + Err(BeaconStateError::SyncCommitteeNotKnown { current_epoch: self.current_epoch(), epoch, }) @@ -1300,7 +1357,7 @@ impl BeaconState { pub fn get_sync_committee_indices( &mut self, sync_committee: &SyncCommittee, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.update_pubkey_cache()?; sync_committee .pubkeys @@ -1308,13 +1365,16 @@ impl BeaconState { .map(|pubkey| { self.pubkey_cache() .get(pubkey) - .ok_or(Error::PubkeyCacheInconsistent) + .ok_or(BeaconStateError::PubkeyCacheInconsistent) }) .collect() } /// Compute the sync committee indices for the next sync committee. - fn get_next_sync_committee_indices(&self, spec: &ChainSpec) -> Result, Error> { + fn get_next_sync_committee_indices( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let epoch = self.current_epoch().safe_add(1)?; let active_validator_indices = self.get_active_validator_indices(epoch, spec)?; @@ -1337,10 +1397,10 @@ impl BeaconState { seed.as_slice(), spec.shuffle_round_count, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let candidate_index = *active_validator_indices .get(shuffled_index) - .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(shuffled_index))?; let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; if effective_balance.safe_mul(max_random_value)? @@ -1354,7 +1414,10 @@ impl BeaconState { } /// Compute the next sync committee. - pub fn get_next_sync_committee(&self, spec: &ChainSpec) -> Result, Error> { + pub fn get_next_sync_committee( + &self, + spec: &ChainSpec, + ) -> Result, BeaconStateError> { let sync_committee_indices = self.get_next_sync_committee_indices(spec)?; let pubkeys = sync_committee_indices @@ -1363,7 +1426,7 @@ impl BeaconState { self.validators() .get(index) .map(|v| v.pubkey) - .ok_or(Error::UnknownValidator(index)) + .ok_or(BeaconStateError::UnknownValidator(index)) }) .collect::, _>>()?; let decompressed_pubkeys = pubkeys @@ -1387,7 +1450,7 @@ impl BeaconState { epoch: Epoch, validator_indices: &[u64], spec: &ChainSpec, - ) -> Result, Error>>, Error> { + ) -> Result, BeaconStateError>>, BeaconStateError> { let sync_committee = self.get_built_sync_committee(epoch, spec)?; Ok(validator_indices @@ -1422,7 +1485,7 @@ impl BeaconState { /// Safely obtains the index for latest block roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_block_roots_index(&self, slot: Slot) -> Result { + fn get_latest_block_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.block_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.block_roots().len())?) } else { @@ -1442,7 +1505,7 @@ impl BeaconState { let i = self.get_latest_block_roots_index(slot)?; self.block_roots() .get(i) - .ok_or(Error::BlockRootsOutOfBounds(i)) + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i)) } /// Return the block root at a recent `epoch`. @@ -1462,12 +1525,12 @@ impl BeaconState { *self .block_roots_mut() .get_mut(i) - .ok_or(Error::BlockRootsOutOfBounds(i))? = block_root; + .ok_or(BeaconStateError::BlockRootsOutOfBounds(i))? = block_root; Ok(()) } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), BeaconStateError> { *self.randao_mixes_mut() = Vector::from_elem(index_root)?; Ok(()) } @@ -1479,7 +1542,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { let current_epoch = self.current_epoch(); let len = E::EpochsPerHistoricalVector::to_u64(); @@ -1488,7 +1551,7 @@ impl BeaconState { { Ok(epoch.as_usize().safe_rem(len as usize)?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1504,7 +1567,11 @@ impl BeaconState { /// # Errors: /// /// See `Self::get_randao_mix`. - pub fn update_randao_mix(&mut self, epoch: Epoch, signature: &Signature) -> Result<(), Error> { + pub fn update_randao_mix( + &mut self, + epoch: Epoch, + signature: &Signature, + ) -> Result<(), BeaconStateError> { let i = epoch .as_usize() .safe_rem(E::EpochsPerHistoricalVector::to_usize())?; @@ -1514,36 +1581,36 @@ impl BeaconState { *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = *self.get_randao_mix(epoch)? ^ signature_hash; Ok(()) } /// Return the randao mix at a recent ``epoch``. - pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, Error> { + pub fn get_randao_mix(&self, epoch: Epoch) -> Result<&Hash256, BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::False)?; self.randao_mixes() .get(i) - .ok_or(Error::RandaoMixesOutOfBounds(i)) + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i)) } /// Set the randao mix at a recent ``epoch``. /// /// Spec v0.12.1 - pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), Error> { + pub fn set_randao_mix(&mut self, epoch: Epoch, mix: Hash256) -> Result<(), BeaconStateError> { let i = self.get_randao_mix_index(epoch, AllowNextEpoch::True)?; *self .randao_mixes_mut() .get_mut(i) - .ok_or(Error::RandaoMixesOutOfBounds(i))? = mix; + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i))? = mix; Ok(()) } /// Safely obtains the index for latest state roots, given some `slot`. /// /// Spec v0.12.1 - fn get_latest_state_roots_index(&self, slot: Slot) -> Result { + fn get_latest_state_roots_index(&self, slot: Slot) -> Result { if slot < self.slot() && self.slot() <= slot.safe_add(self.state_roots().len() as u64)? { Ok(slot.as_usize().safe_rem(self.state_roots().len())?) } else { @@ -1552,38 +1619,42 @@ impl BeaconState { } /// Gets the state root for some slot. - pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, Error> { + pub fn get_state_root(&self, slot: Slot) -> Result<&Hash256, BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; self.state_roots() .get(i) - .ok_or(Error::StateRootsOutOfBounds(i)) + .ok_or(BeaconStateError::StateRootsOutOfBounds(i)) } /// Gets the state root for the start slot of some epoch. - pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { + pub fn get_state_root_at_epoch_start(&self, epoch: Epoch) -> Result { self.get_state_root(epoch.start_slot(E::slots_per_epoch())) .copied() } /// Gets the oldest (earliest slot) state root. - pub fn get_oldest_state_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_state_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.state_roots().len()); self.get_state_root(oldest_slot) } /// Gets the oldest (earliest slot) block root. - pub fn get_oldest_block_root(&self) -> Result<&Hash256, Error> { + pub fn get_oldest_block_root(&self) -> Result<&Hash256, BeaconStateError> { let oldest_slot = self.slot().saturating_sub(self.block_roots().len()); self.get_block_root(oldest_slot) } /// Sets the latest state root for slot. - pub fn set_state_root(&mut self, slot: Slot, state_root: Hash256) -> Result<(), Error> { + pub fn set_state_root( + &mut self, + slot: Slot, + state_root: Hash256, + ) -> Result<(), BeaconStateError> { let i = self.get_latest_state_roots_index(slot)?; *self .state_roots_mut() .get_mut(i) - .ok_or(Error::StateRootsOutOfBounds(i))? = state_root; + .ok_or(BeaconStateError::StateRootsOutOfBounds(i))? = state_root; Ok(()) } @@ -1592,7 +1663,7 @@ impl BeaconState { &self, epoch: Epoch, allow_next_epoch: AllowNextEpoch, - ) -> Result { + ) -> Result { // We allow the slashings vector to be accessed at any cached epoch at or before // the current epoch, or the next epoch if `AllowNextEpoch::True` is passed. let current_epoch = self.current_epoch(); @@ -1603,7 +1674,7 @@ impl BeaconState { .as_usize() .safe_rem(E::EpochsPerSlashingsVector::to_usize())?) } else { - Err(Error::EpochOutOfBounds) + Err(BeaconStateError::EpochOutOfBounds) } } @@ -1613,21 +1684,21 @@ impl BeaconState { } /// Get the total slashed balances for some epoch. - pub fn get_slashings(&self, epoch: Epoch) -> Result { + pub fn get_slashings(&self, epoch: Epoch) -> Result { let i = self.get_slashings_index(epoch, AllowNextEpoch::False)?; self.slashings() .get(i) .copied() - .ok_or(Error::SlashingsOutOfBounds(i)) + .ok_or(BeaconStateError::SlashingsOutOfBounds(i)) } /// Set the total slashed balances for some epoch. - pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), Error> { + pub fn set_slashings(&mut self, epoch: Epoch, value: u64) -> Result<(), BeaconStateError> { let i = self.get_slashings_index(epoch, AllowNextEpoch::True)?; *self .slashings_mut() .get_mut(i) - .ok_or(Error::SlashingsOutOfBounds(i))? = value; + .ok_or(BeaconStateError::SlashingsOutOfBounds(i))? = value; Ok(()) } @@ -1667,10 +1738,10 @@ impl BeaconState { &mut ExitCache, &mut EpochCache, ), - Error, + BeaconStateError, > { match self { - BeaconState::Base(_) => Err(Error::IncorrectStateVariant), + BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Altair(state) => Ok(( &mut state.validators, &mut state.balances, @@ -1745,18 +1816,21 @@ impl BeaconState { } /// Get the balance of a single validator. - pub fn get_balance(&self, validator_index: usize) -> Result { + pub fn get_balance(&self, validator_index: usize) -> Result { self.balances() .get(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) .copied() } /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_balance_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.balances_mut() .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) + .ok_or(BeaconStateError::BalancesOutOfBounds(validator_index)) } /// Generate a seed for the given `epoch`. @@ -1765,7 +1839,7 @@ impl BeaconState { epoch: Epoch, domain_type: Domain, spec: &ChainSpec, - ) -> Result { + ) -> Result { // Bypass the safe getter for RANDAO so we can gracefully handle the scenario where `epoch // == 0`. let mix = { @@ -1776,7 +1850,7 @@ impl BeaconState { let i_mod = i.as_usize().safe_rem(self.randao_mixes().len())?; self.randao_mixes() .get(i_mod) - .ok_or(Error::RandaoMixesOutOfBounds(i_mod))? + .ok_or(BeaconStateError::RandaoMixesOutOfBounds(i_mod))? }; let domain_bytes = int_to_bytes4(spec.get_domain_constant(domain_type)); let epoch_bytes = int_to_bytes8(epoch.as_u64()); @@ -1795,17 +1869,20 @@ impl BeaconState { } /// Safe indexer for the `validators` list. - pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, Error> { + pub fn get_validator(&self, validator_index: usize) -> Result<&Validator, BeaconStateError> { self.validators() .get(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Safe mutator for the `validators` list. - pub fn get_validator_mut(&mut self, validator_index: usize) -> Result<&mut Validator, Error> { + pub fn get_validator_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut Validator, BeaconStateError> { self.validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Add a validator to the registry and return the validator index that was allocated for it. @@ -1815,7 +1892,7 @@ impl BeaconState { withdrawal_credentials: Hash256, amount: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let index = self.validators().len(); let fork_name = self.fork_name_unchecked(); self.validators_mut().push(Validator::from_deposit( @@ -1847,7 +1924,7 @@ impl BeaconState { if pubkey_cache.len() == index { let success = pubkey_cache.insert(pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } @@ -1858,14 +1935,14 @@ impl BeaconState { pub fn get_validator_cow( &mut self, validator_index: usize, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { self.validators_mut() .get_cow(validator_index) - .ok_or(Error::UnknownValidator(validator_index)) + .ok_or(BeaconStateError::UnknownValidator(validator_index)) } /// Return the effective balance for a validator with the given `validator_index`. - pub fn get_effective_balance(&self, validator_index: usize) -> Result { + pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) .map(|v| v.effective_balance) } @@ -1873,20 +1950,27 @@ impl BeaconState { /// Get the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score(&self, validator_index: usize) -> Result { + pub fn get_inactivity_score(&self, validator_index: usize) -> Result { self.inactivity_scores()? .get(validator_index) .copied() - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Get a mutable reference to the inactivity score for a single validator. /// /// Will error if the state lacks an `inactivity_scores` field. - pub fn get_inactivity_score_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + pub fn get_inactivity_score_mut( + &mut self, + validator_index: usize, + ) -> Result<&mut u64, BeaconStateError> { self.inactivity_scores_mut()? .get_mut(validator_index) - .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) + .ok_or(BeaconStateError::InactivityScoresOutOfBounds( + validator_index, + )) } /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. @@ -1896,14 +1980,14 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result { Ok(spec.compute_activation_exit_epoch(epoch)?) } /// Return the churn limit for the current epoch (number of validators who can leave per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_validator_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(std::cmp::max( spec.min_per_epoch_churn_limit, (self @@ -1916,7 +2000,7 @@ impl BeaconState { /// Return the activation churn limit for the current epoch (number of validators who can enter per epoch). /// /// Uses the current epoch committee cache, and will error if it isn't initialized. - pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_churn_limit(&self, spec: &ChainSpec) -> Result { Ok(match self { BeaconState::Base(_) | BeaconState::Altair(_) @@ -1942,7 +2026,7 @@ impl BeaconState { &self, validator_index: usize, relative_epoch: RelativeEpoch, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { let cache = self.committee_cache(relative_epoch)?; Ok(cache.get_attestation_duties(validator_index)) @@ -1952,7 +2036,10 @@ impl BeaconState { /// /// This method should rarely be invoked because single-pass epoch processing keeps the total /// active balance cache up to date. - pub fn compute_total_active_balance_slow(&self, spec: &ChainSpec) -> Result { + pub fn compute_total_active_balance_slow( + &self, + spec: &ChainSpec, + ) -> Result { let current_epoch = self.current_epoch(); let mut total_active_balance = 0; @@ -1974,20 +2061,20 @@ impl BeaconState { /// the current committee cache is. /// /// Returns minimum `EFFECTIVE_BALANCE_INCREMENT`, to avoid div by 0. - pub fn get_total_active_balance(&self) -> Result { + pub fn get_total_active_balance(&self) -> Result { self.get_total_active_balance_at_epoch(self.current_epoch()) } /// Get the cached total active balance while checking that it is for the correct `epoch`. - pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { + pub fn get_total_active_balance_at_epoch(&self, epoch: Epoch) -> Result { let (initialized_epoch, balance) = self .total_active_balance() - .ok_or(Error::TotalActiveBalanceCacheUninitialized)?; + .ok_or(BeaconStateError::TotalActiveBalanceCacheUninitialized)?; if initialized_epoch == epoch { Ok(balance) } else { - Err(Error::TotalActiveBalanceCacheInconsistent { + Err(BeaconStateError::TotalActiveBalanceCacheInconsistent { initialized_epoch, current_epoch: epoch, }) @@ -2007,7 +2094,10 @@ impl BeaconState { /// Build the total active balance cache for the current epoch if it is not already built. #[instrument(skip_all, level = "debug")] - pub fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_total_active_balance_cache( + &mut self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { if self .get_total_active_balance_at_epoch(self.current_epoch()) .is_err() @@ -2021,7 +2111,7 @@ impl BeaconState { pub fn force_build_total_active_balance_cache( &mut self, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let total_active_balance = self.compute_total_active_balance_slow(spec)?; *self.total_active_balance_mut() = Some((self.current_epoch(), total_active_balance)); Ok(()) @@ -2038,7 +2128,7 @@ impl BeaconState { epoch: Epoch, previous_epoch: Epoch, current_epoch: Epoch, - ) -> Result<&mut List, Error> { + ) -> Result<&mut List, BeaconStateError> { if epoch == current_epoch { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -2068,7 +2158,7 @@ impl BeaconState { /// Build all caches (except the tree hash cache), if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_all_committee_caches(spec)?; self.update_pubkey_cache()?; self.build_exit_cache(spec)?; @@ -2079,7 +2169,7 @@ impl BeaconState { /// Build all committee caches, if they need to be built. #[instrument(skip_all, level = "debug")] - pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_all_committee_caches(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { self.build_committee_cache(RelativeEpoch::Previous, spec)?; self.build_committee_cache(RelativeEpoch::Current, spec)?; self.build_committee_cache(RelativeEpoch::Next, spec)?; @@ -2088,7 +2178,7 @@ impl BeaconState { /// Build the exit cache, if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn build_exit_cache(&mut self, spec: &ChainSpec) -> Result<(), BeaconStateError> { if self.exit_cache().check_initialized().is_err() { *self.exit_cache_mut() = ExitCache::new(self.validators(), spec)?; } @@ -2097,7 +2187,7 @@ impl BeaconState { /// Build the slashings cache if it needs to be built. #[instrument(skip_all, level = "debug")] - pub fn build_slashings_cache(&mut self) -> Result<(), Error> { + pub fn build_slashings_cache(&mut self) -> Result<(), BeaconStateError> { let latest_block_slot = self.latest_block_header().slot; if !self.slashings_cache().is_initialized(latest_block_slot) { *self.slashings_cache_mut() = SlashingsCache::new(latest_block_slot, self.validators()); @@ -2111,7 +2201,7 @@ impl BeaconState { } /// Drop all caches on the state. - pub fn drop_all_caches(&mut self) -> Result<(), Error> { + pub fn drop_all_caches(&mut self) -> Result<(), BeaconStateError> { self.drop_total_active_balance_cache(); self.drop_committee_cache(RelativeEpoch::Previous)?; self.drop_committee_cache(RelativeEpoch::Current)?; @@ -2139,7 +2229,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let is_initialized = self .committee_cache_at_index(i)? @@ -2160,7 +2250,7 @@ impl BeaconState { &mut self, relative_epoch: RelativeEpoch, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let epoch = relative_epoch.into_epoch(self.current_epoch()); let i = Self::committee_cache_index(relative_epoch); @@ -2176,7 +2266,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { CommitteeCache::initialized(self, epoch, spec) } @@ -2186,7 +2276,7 @@ impl BeaconState { /// /// Note: this function will not build any new committee caches, nor will it update the total /// active balance cache. The total active balance cache must be updated separately. - pub fn advance_caches(&mut self) -> Result<(), Error> { + pub fn advance_caches(&mut self) -> Result<(), BeaconStateError> { self.committee_caches_mut().rotate_left(1); let next = Self::committee_cache_index(RelativeEpoch::Next); @@ -2202,30 +2292,51 @@ impl BeaconState { } } + pub fn is_parent_block_full(&self) -> bool { + match self { + BeaconState::Base(_) | BeaconState::Altair(_) => false, + // TODO(EIP-7732): check the implications of this when we get to forkchoice modifications + BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) + | BeaconState::Electra(_) + | BeaconState::Fulu(_) => true, + BeaconState::Gloas(state) => { + state.latest_execution_payload_bid.block_hash == state.latest_block_hash + } + } + } + /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { + fn committee_cache_at_slot( + &self, + slot: Slot, + ) -> Result<&Arc, BeaconStateError> { let epoch = slot.epoch(E::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { + fn committee_cache_at_index( + &self, + index: usize, + ) -> Result<&Arc, BeaconStateError> { self.committee_caches() .get(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. fn committee_cache_at_index_mut( &mut self, index: usize, - ) -> Result<&mut Arc, Error> { + ) -> Result<&mut Arc, BeaconStateError> { self.committee_caches_mut() .get_mut(index) - .ok_or(Error::CommitteeCachesOutOfBounds(index)) + .ok_or(BeaconStateError::CommitteeCachesOutOfBounds(index)) } /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been @@ -2233,19 +2344,24 @@ impl BeaconState { pub fn committee_cache( &self, relative_epoch: RelativeEpoch, - ) -> Result<&Arc, Error> { + ) -> Result<&Arc, BeaconStateError> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; if cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) { Ok(cache) } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) + Err(BeaconStateError::CommitteeCacheUninitialized(Some( + relative_epoch, + ))) } } /// Drops the cache, leaving it in an uninitialized state. - pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { + pub fn drop_committee_cache( + &mut self, + relative_epoch: RelativeEpoch, + ) -> Result<(), BeaconStateError> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = Arc::new(CommitteeCache::default()); Ok(()) @@ -2256,7 +2372,7 @@ impl BeaconState { /// Adds all `pubkeys` from the `validators` which are not already in the cache. Will /// never re-add a pubkey. #[instrument(skip_all, level = "debug")] - pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { + pub fn update_pubkey_cache(&mut self) -> Result<(), BeaconStateError> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); let start_index = pubkey_cache.len(); @@ -2264,7 +2380,7 @@ impl BeaconState { let index = start_index.safe_add(i)?; let success = pubkey_cache.insert(validator.pubkey, index); if !success { - return Err(Error::PubkeyCacheInconsistent); + return Err(BeaconStateError::PubkeyCacheInconsistent); } } *self.pubkey_cache_mut() = pubkey_cache; @@ -2342,7 +2458,7 @@ impl BeaconState { /// /// Initialize the tree hash cache if it isn't already initialized. #[instrument(skip_all, level = "debug")] - pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { + pub fn update_tree_hash_cache<'a>(&'a mut self) -> Result { self.apply_pending_mutations()?; map_beacon_state_ref!(&'a _, self.to_ref(), |inner, cons| { let root = inner.tree_hash_root(); @@ -2354,7 +2470,7 @@ impl BeaconState { /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. - pub fn update_validators_tree_hash_cache(&mut self) -> Result { + pub fn update_validators_tree_hash_cache(&mut self) -> Result { self.validators_mut().apply_updates()?; Ok(self.validators().tree_hash_root()) } @@ -2365,7 +2481,7 @@ impl BeaconState { &self, previous_epoch: Epoch, val: &Validator, - ) -> Result { + ) -> Result { Ok(val.is_active_at(previous_epoch) || (val.slashed && previous_epoch.safe_add(Epoch::new(1))? < val.withdrawable_epoch)) } @@ -2389,7 +2505,7 @@ impl BeaconState { pub fn get_sync_committee_for_next_slot( &self, spec: &ChainSpec, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { let next_slot_epoch = self .slot() .saturating_add(Slot::new(1)) @@ -2415,7 +2531,7 @@ impl BeaconState { // ******* Electra accessors ******* /// Return the churn limit for the current epoch. - pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_balance_churn_limit(&self, spec: &ChainSpec) -> Result { let total_active_balance = self.get_total_active_balance()?; let churn = std::cmp::max( spec.min_per_epoch_churn_limit_electra, @@ -2426,20 +2542,26 @@ impl BeaconState { } /// Return the churn limit for the current epoch dedicated to activations and exits. - pub fn get_activation_exit_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_activation_exit_churn_limit( + &self, + spec: &ChainSpec, + ) -> Result { Ok(std::cmp::min( spec.max_per_epoch_activation_exit_churn_limit, self.get_balance_churn_limit(spec)?, )) } - pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { + pub fn get_consolidation_churn_limit(&self, spec: &ChainSpec) -> Result { self.get_balance_churn_limit(spec)? .safe_sub(self.get_activation_exit_churn_limit(spec)?) .map_err(Into::into) } - pub fn get_pending_balance_to_withdraw(&self, validator_index: usize) -> Result { + pub fn get_pending_balance_to_withdraw( + &self, + validator_index: usize, + ) -> Result { let mut pending_balance = 0; for withdrawal in self .pending_partial_withdrawals()? @@ -2457,11 +2579,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let balance = self .balances_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; if *balance > spec.min_activation_balance { let excess_balance = balance.safe_sub(spec.min_activation_balance)?; *balance = spec.min_activation_balance; @@ -2482,11 +2604,11 @@ impl BeaconState { &mut self, validator_index: usize, spec: &ChainSpec, - ) -> Result<(), Error> { + ) -> Result<(), BeaconStateError> { let validator = self .validators_mut() .get_mut(validator_index) - .ok_or(Error::UnknownValidator(validator_index))?; + .ok_or(BeaconStateError::UnknownValidator(validator_index))?; AsMut::<[u8; 32]>::as_mut(&mut validator.withdrawal_credentials)[0] = spec.compounding_withdrawal_prefix_byte; @@ -2498,7 +2620,7 @@ impl BeaconState { &mut self, exit_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_exit_epoch = std::cmp::max( self.earliest_exit_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2528,7 +2650,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables *self.exit_balance_to_consume_mut()? = @@ -2543,7 +2665,7 @@ impl BeaconState { &mut self, consolidation_balance: u64, spec: &ChainSpec, - ) -> Result { + ) -> Result { let mut earliest_consolidation_epoch = std::cmp::max( self.earliest_consolidation_epoch()?, self.compute_activation_exit_epoch(self.current_epoch(), spec)?, @@ -2575,7 +2697,7 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) - | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + | BeaconState::Deneb(_) => Err(BeaconStateError::IncorrectStateVariant), BeaconState::Electra(_) | BeaconState::Fulu(_) | BeaconState::Gloas(_) => { // Consume the balance and update state variables. *self.consolidation_balance_to_consume_mut()? = @@ -2587,7 +2709,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), BeaconStateError> { // Required for macros (which use type-hints internally). match (&mut *self, base) { @@ -2678,7 +2800,11 @@ impl BeaconState { Ok(()) } - pub fn rebase_caches_on(&mut self, base: &Self, spec: &ChainSpec) -> Result<(), Error> { + pub fn rebase_caches_on( + &mut self, + base: &Self, + spec: &ChainSpec, + ) -> Result<(), BeaconStateError> { // Use pubkey cache from `base` if it contains superior information (likely if our cache is // uninitialized). Be careful not to use a cache which has *more* validators than expected, // as other code expects `self.pubkey_cache().len() <= self.validators.len()`. @@ -2767,7 +2893,7 @@ impl BeaconState { } #[allow(clippy::arithmetic_side_effects)] - pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + pub fn apply_pending_mutations(&mut self) -> Result<(), BeaconStateError> { match self { Self::Base(inner) => { map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) @@ -2797,43 +2923,43 @@ impl BeaconState { Ok(()) } - pub fn compute_current_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_current_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA + CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::CURRENT_SYNC_COMMITTEE_INDEX + CURRENT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_next_sync_committee_proof(&self) -> Result, Error> { + pub fn compute_next_sync_committee_proof(&self) -> Result, BeaconStateError> { // Sync committees are top-level fields, subtract off the generalized indices // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate let field_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA + NEXT_SYNC_COMMITTEE_INDEX_ELECTRA } else { - light_client_update::NEXT_SYNC_COMMITTEE_INDEX + NEXT_SYNC_COMMITTEE_INDEX }; let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } - pub fn compute_finalized_root_proof(&self) -> Result, Error> { + pub fn compute_finalized_root_proof(&self) -> Result, BeaconStateError> { // Finalized root is the right child of `finalized_checkpoint`, divide by two to get // the generalized index of `state.finalized_checkpoint`. let checkpoint_root_gindex = if self.fork_name_unchecked().electra_enabled() { - light_client_update::FINALIZED_ROOT_INDEX_ELECTRA + FINALIZED_ROOT_INDEX_ELECTRA } else { - light_client_update::FINALIZED_ROOT_INDEX + FINALIZED_ROOT_INDEX }; let checkpoint_gindex = checkpoint_root_gindex / 2; @@ -2856,9 +2982,9 @@ impl BeaconState { &self, field_index: usize, leaves: &[Hash256], - ) -> Result, Error> { + ) -> Result, BeaconStateError> { if field_index >= leaves.len() { - return Err(Error::IndexNotSupported(field_index)); + return Err(BeaconStateError::IndexNotSupported(field_index)); } let depth = self.num_fields_pow2().ilog2() as usize; @@ -2917,45 +3043,45 @@ impl BeaconState { } } -impl From for Error { - fn from(e: RelativeEpochError) -> Error { - Error::RelativeEpochError(e) +impl From for BeaconStateError { + fn from(e: RelativeEpochError) -> BeaconStateError { + BeaconStateError::RelativeEpochError(e) } } -impl From for Error { - fn from(e: ssz_types::Error) -> Error { - Error::SszTypesError(e) +impl From for BeaconStateError { + fn from(e: ssz_types::Error) -> BeaconStateError { + BeaconStateError::SszTypesError(e) } } -impl From for Error { - fn from(e: bls::Error) -> Error { - Error::BlsError(e) +impl From for BeaconStateError { + fn from(e: bls::Error) -> BeaconStateError { + BeaconStateError::BlsError(e) } } -impl From for Error { - fn from(e: tree_hash::Error) -> Error { - Error::TreeHashError(e) +impl From for BeaconStateError { + fn from(e: tree_hash::Error) -> BeaconStateError { + BeaconStateError::TreeHashError(e) } } -impl From for Error { - fn from(e: merkle_proof::MerkleTreeError) -> Error { - Error::MerkleTreeError(e) +impl From for BeaconStateError { + fn from(e: merkle_proof::MerkleTreeError) -> BeaconStateError { + BeaconStateError::MerkleTreeError(e) } } -impl From for Error { - fn from(e: ArithError) -> Error { - Error::ArithError(e) +impl From for BeaconStateError { + fn from(e: ArithError) -> BeaconStateError { + BeaconStateError::ArithError(e) } } -impl From for Error { - fn from(e: milhouse::Error) -> Self { - Self::MilhouseError(e) +impl From for BeaconStateError { + fn from(e: milhouse::Error) -> BeaconStateError { + BeaconStateError::MilhouseError(e) } } diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/state/committee_cache.rs similarity index 91% rename from consensus/types/src/beacon_state/committee_cache.rs rename to consensus/types/src/state/committee_cache.rs index 06242e8d20e..15f6a4cd376 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/state/committee_cache.rs @@ -1,17 +1,20 @@ #![allow(clippy::arithmetic_side_effects)] -use crate::*; -use core::num::NonZeroUsize; -use derivative::Derivative; +use std::{num::NonZeroUsize, ops::Range, sync::Arc}; + +use educe::Educe; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz::{Decode, DecodeError, Encode, four_byte_option_impl}; use ssz_derive::{Decode, Encode}; -use std::ops::Range; -use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; -mod tests; +use crate::{ + attestation::{AttestationDuty, BeaconCommittee, CommitteeIndex}, + core::{ChainSpec, Domain, Epoch, EthSpec, Slot}, + state::{BeaconState, BeaconStateError}, + validator::Validator, +}; // Define "legacy" implementations of `Option`, `Option` which use four bytes // for encoding the union selector. @@ -20,13 +23,13 @@ four_byte_option_impl!(four_byte_option_non_zero_usize, NonZeroUsize); /// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to /// read the committees for the given epoch. -#[derive(Derivative, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)] -#[derivative(PartialEq)] +#[derive(Educe, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)] +#[educe(PartialEq)] pub struct CommitteeCache { #[ssz(with = "four_byte_option_epoch")] initialized_epoch: Option, shuffling: Vec, - #[derivative(PartialEq(compare_with = "compare_shuffling_positions"))] + #[educe(PartialEq(method(compare_shuffling_positions)))] shuffling_positions: Vec, committees_per_slot: u64, slots_per_epoch: u64, @@ -66,7 +69,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result, Error> { + ) -> Result, BeaconStateError> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -77,23 +80,23 @@ impl CommitteeCache { .saturating_sub(1u64); if reqd_randao_epoch < state.min_randao_epoch() || epoch > state.current_epoch() + 1 { - return Err(Error::EpochOutOfBounds); + return Err(BeaconStateError::EpochOutOfBounds); } // May cause divide-by-zero errors. if E::slots_per_epoch() == 0 { - return Err(Error::ZeroSlotsPerEpoch); + return Err(BeaconStateError::ZeroSlotsPerEpoch); } // The use of `NonZeroUsize` reduces the maximum number of possible validators by one. if state.validators().len() == usize::MAX { - return Err(Error::TooManyValidators); + return Err(BeaconStateError::TooManyValidators); } let active_validator_indices = get_active_validator_indices(state.validators(), epoch); if active_validator_indices.is_empty() { - return Err(Error::InsufficientValidators); + return Err(BeaconStateError::InsufficientValidators); } let committees_per_slot = @@ -107,13 +110,14 @@ impl CommitteeCache { &seed[..], false, ) - .ok_or(Error::UnableToShuffle)?; + .ok_or(BeaconStateError::UnableToShuffle)?; let mut shuffling_positions = vec![<_>::default(); state.validators().len()]; for (i, &v) in shuffling.iter().enumerate() { *shuffling_positions .get_mut(v) - .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); + .ok_or(BeaconStateError::ShuffleIndexOutOfBounds(v))? = + NonZeroUsize::new(i + 1).into(); } Ok(Arc::new(CommitteeCache { @@ -188,24 +192,24 @@ impl CommitteeCache { pub fn get_beacon_committees_at_slot( &self, slot: Slot, - ) -> Result>, Error> { + ) -> Result>, BeaconStateError> { if self.initialized_epoch.is_none() { - return Err(Error::CommitteeCacheUninitialized(None)); + return Err(BeaconStateError::CommitteeCacheUninitialized(None)); } (0..self.committees_per_slot()) .map(|index| { self.get_beacon_committee(slot, index) - .ok_or(Error::NoCommittee { slot, index }) + .ok_or(BeaconStateError::NoCommittee { slot, index }) }) .collect() } /// Returns all committees for `self.initialized_epoch`. - pub fn get_all_beacon_committees(&self) -> Result>, Error> { + pub fn get_all_beacon_committees(&self) -> Result>, BeaconStateError> { let initialized_epoch = self .initialized_epoch - .ok_or(Error::CommitteeCacheUninitialized(None))?; + .ok_or(BeaconStateError::CommitteeCacheUninitialized(None))?; initialized_epoch.slot_iter(self.slots_per_epoch).try_fold( Vec::with_capacity(self.epoch_committee_count()), diff --git a/consensus/types/src/epoch_cache.rs b/consensus/types/src/state/epoch_cache.rs similarity index 97% rename from consensus/types/src/epoch_cache.rs rename to consensus/types/src/state/epoch_cache.rs index 9956cb400a7..cdea0d143df 100644 --- a/consensus/types/src/epoch_cache.rs +++ b/consensus/types/src/state/epoch_cache.rs @@ -1,7 +1,12 @@ -use crate::{ActivationQueue, BeaconStateError, ChainSpec, Epoch, Hash256, Slot}; -use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; +use safe_arith::{ArithError, SafeArith}; + +use crate::{ + core::{ChainSpec, Epoch, Hash256, Slot}, + state::{ActivationQueue, BeaconStateError}, +}; + /// Cache of values which are uniquely determined at the start of an epoch. /// /// The values are fixed with respect to the last block of the _prior_ epoch, which we refer diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/state/exit_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/exit_cache.rs rename to consensus/types/src/state/exit_cache.rs index 2828a6138c6..43809d1af0e 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/state/exit_cache.rs @@ -1,7 +1,13 @@ -use super::{BeaconStateError, ChainSpec, Epoch, Validator}; -use safe_arith::SafeArith; use std::cmp::Ordering; +use safe_arith::SafeArith; + +use crate::{ + core::{ChainSpec, Epoch}, + state::BeaconStateError, + validator::Validator, +}; + /// Map from exit epoch to the number of validators with that exit epoch. #[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/state/historical_batch.rs similarity index 81% rename from consensus/types/src/historical_batch.rs rename to consensus/types/src/state/historical_batch.rs index 55377f24894..0167d64f62a 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/state/historical_batch.rs @@ -1,11 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; - +use context_deserialize::context_deserialize; +use milhouse::Vector; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + test_utils::TestRandom, +}; + /// Historical block and state roots. /// /// Spec v0.12.1 @@ -26,6 +31,7 @@ pub struct HistoricalBatch { #[cfg(test)] mod tests { use super::*; + use crate::core::MainnetEthSpec; pub type FoundationHistoricalBatch = HistoricalBatch; diff --git a/consensus/types/src/historical_summary.rs b/consensus/types/src/state/historical_summary.rs similarity index 84% rename from consensus/types/src/historical_summary.rs rename to consensus/types/src/state/historical_summary.rs index 0aad2d903d7..f520e464837 100644 --- a/consensus/types/src/historical_summary.rs +++ b/consensus/types/src/state/historical_summary.rs @@ -1,13 +1,18 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{BeaconState, EthSpec, ForkName, Hash256}; -use compare_fields_derive::CompareFields; +use compare_fields::CompareFields; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// `HistoricalSummary` matches the components of the phase0 `HistoricalBatch` /// making the two hash_tree_root-compatible. This struct is introduced into the beacon state /// in the Capella hard fork. diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/state/iter.rs similarity index 95% rename from consensus/types/src/beacon_state/iter.rs rename to consensus/types/src/state/iter.rs index d99c769e402..63f28d74c4b 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/state/iter.rs @@ -1,4 +1,7 @@ -use crate::*; +use crate::{ + core::{EthSpec, Hash256, Slot}, + state::{BeaconState, BeaconStateError}, +}; /// Returns an iterator across the past block roots of `state` in descending slot-order. /// @@ -28,7 +31,7 @@ impl<'a, E: EthSpec> BlockRootsIter<'a, E> { } impl Iterator for BlockRootsIter<'_, E> { - type Item = Result<(Slot, Hash256), Error>; + type Item = Result<(Slot, Hash256), BeaconStateError>; fn next(&mut self) -> Option { if self.prev > self.genesis_slot @@ -53,6 +56,7 @@ impl Iterator for BlockRootsIter<'_, E> { #[cfg(test)] mod test { use crate::*; + use fixed_bytes::FixedBytesExtended; type E = MinimalEthSpec; diff --git a/consensus/types/src/state/mod.rs b/consensus/types/src/state/mod.rs new file mode 100644 index 00000000000..309796d3592 --- /dev/null +++ b/consensus/types/src/state/mod.rs @@ -0,0 +1,35 @@ +mod activation_queue; +mod balance; +mod beacon_state; +#[macro_use] +mod committee_cache; +mod epoch_cache; +mod exit_cache; +mod historical_batch; +mod historical_summary; +mod iter; +mod progressive_balances_cache; +mod pubkey_cache; +mod slashings_cache; + +pub use activation_queue::ActivationQueue; +pub use balance::Balance; +pub use beacon_state::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateBellatrix, BeaconStateCapella, + BeaconStateDeneb, BeaconStateElectra, BeaconStateError, BeaconStateFulu, BeaconStateGloas, + BeaconStateHash, BeaconStateRef, CACHED_EPOCHS, +}; +pub use committee_cache::{ + CommitteeCache, compute_committee_index_in_epoch, compute_committee_range_in_epoch, + epoch_committee_count, get_active_validator_indices, +}; +pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; +pub use exit_cache::ExitCache; +pub use historical_batch::HistoricalBatch; +pub use historical_summary::HistoricalSummary; +pub use iter::BlockRootsIter; +pub use progressive_balances_cache::{ + EpochTotalBalances, ProgressiveBalancesCache, is_progressive_balances_enabled, +}; +pub use pubkey_cache::PubkeyCache; +pub use slashings_cache::SlashingsCache; diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/state/progressive_balances_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/progressive_balances_cache.rs rename to consensus/types/src/state/progressive_balances_cache.rs index 67d1155dbf1..1e4c311f9a2 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/state/progressive_balances_cache.rs @@ -1,14 +1,16 @@ -use crate::beacon_state::balance::Balance; +#[cfg(feature = "arbitrary")] +use arbitrary::Arbitrary; +use safe_arith::SafeArith; + use crate::{ - BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, - consts::altair::{ + attestation::ParticipationFlags, + core::consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, + core::{ChainSpec, Epoch, EthSpec}, + state::{Balance, BeaconState, BeaconStateError}, }; -#[cfg(feature = "arbitrary")] -use arbitrary::Arbitrary; -use safe_arith::SafeArith; /// This cache keeps track of the accumulated target attestation balance for the current & previous /// epochs. The cached values can be utilised by fork choice to calculate unrealized justification diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/state/pubkey_cache.rs similarity index 98% rename from consensus/types/src/beacon_state/pubkey_cache.rs rename to consensus/types/src/state/pubkey_cache.rs index 85ed00340d7..e62fafb53a6 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/state/pubkey_cache.rs @@ -1,4 +1,4 @@ -use crate::*; +use bls::PublicKeyBytes; use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; diff --git a/consensus/types/src/beacon_state/slashings_cache.rs b/consensus/types/src/state/slashings_cache.rs similarity index 96% rename from consensus/types/src/beacon_state/slashings_cache.rs rename to consensus/types/src/state/slashings_cache.rs index 6530f795e9f..b6ed583df89 100644 --- a/consensus/types/src/beacon_state/slashings_cache.rs +++ b/consensus/types/src/state/slashings_cache.rs @@ -1,8 +1,9 @@ -use crate::{BeaconStateError, Slot, Validator}; #[cfg(feature = "arbitrary")] use arbitrary::Arbitrary; use rpds::HashTrieSetSync as HashTrieSet; +use crate::{core::Slot, state::BeaconStateError, validator::Validator}; + /// Persistent (cheap to clone) cache of all slashed validator indices. #[cfg_attr(feature = "arbitrary", derive(Arbitrary))] #[derive(Debug, Default, Clone, PartialEq)] diff --git a/consensus/types/src/contribution_and_proof.rs b/consensus/types/src/sync_committee/contribution_and_proof.rs similarity index 88% rename from consensus/types/src/contribution_and_proof.rs rename to consensus/types/src/sync_committee/contribution_and_proof.rs index 4d70cd1f8a0..2a344b89dee 100644 --- a/consensus/types/src/contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, - SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators aggregate sync committee contribution and selection proof. #[cfg_attr( feature = "arbitrary", diff --git a/consensus/types/src/sync_committee/mod.rs b/consensus/types/src/sync_committee/mod.rs new file mode 100644 index 00000000000..5a75975fe0a --- /dev/null +++ b/consensus/types/src/sync_committee/mod.rs @@ -0,0 +1,25 @@ +mod contribution_and_proof; +mod signed_contribution_and_proof; +mod sync_aggregate; +mod sync_aggregator_selection_data; +mod sync_committee; +mod sync_committee_contribution; +mod sync_committee_message; +mod sync_committee_subscription; +mod sync_duty; +mod sync_selection_proof; +mod sync_subnet_id; + +pub use contribution_and_proof::ContributionAndProof; +pub use signed_contribution_and_proof::SignedContributionAndProof; +pub use sync_aggregate::{Error as SyncAggregateError, SyncAggregate}; +pub use sync_aggregator_selection_data::SyncAggregatorSelectionData; +pub use sync_committee::{Error as SyncCommitteeError, SyncCommittee}; +pub use sync_committee_contribution::{ + Error as SyncCommitteeContributionError, SyncCommitteeContribution, SyncContributionData, +}; +pub use sync_committee_message::SyncCommitteeMessage; +pub use sync_committee_subscription::SyncCommitteeSubscription; +pub use sync_duty::SyncDuty; +pub use sync_selection_proof::SyncSelectionProof; +pub use sync_subnet_id::{SyncSubnetId, sync_subnet_id_to_string}; diff --git a/consensus/types/src/signed_contribution_and_proof.rs b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs similarity index 87% rename from consensus/types/src/signed_contribution_and_proof.rs rename to consensus/types/src/sync_committee/signed_contribution_and_proof.rs index 51c453d32ff..0027003b9f3 100644 --- a/consensus/types/src/signed_contribution_and_proof.rs +++ b/consensus/types/src/sync_committee/signed_contribution_and_proof.rs @@ -1,14 +1,17 @@ -use super::{ - ChainSpec, ContributionAndProof, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, - Signature, SignedRoot, SyncCommitteeContribution, SyncSelectionProof, -}; -use crate::context_deserialize; -use crate::test_utils::TestRandom; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot}, + fork::{Fork, ForkName}, + sync_committee::{ContributionAndProof, SyncCommitteeContribution, SyncSelectionProof}, + test_utils::TestRandom, +}; + /// A Validators signed contribution proof to publish on the `sync_committee_contribution_and_proof` /// gossipsub topic. #[cfg_attr( diff --git a/consensus/types/src/sync_aggregate.rs b/consensus/types/src/sync_committee/sync_aggregate.rs similarity index 86% rename from consensus/types/src/sync_aggregate.rs rename to consensus/types/src/sync_committee/sync_aggregate.rs index 7a4ef8f026a..e5848aa22ce 100644 --- a/consensus/types/src/sync_aggregate.rs +++ b/consensus/types/src/sync_committee/sync_aggregate.rs @@ -1,14 +1,20 @@ -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{AggregateSignature, BitVector, EthSpec, ForkName, SyncCommitteeContribution}; -use derivative::Derivative; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; +use educe::Educe; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}, + fork::ForkName, + sync_committee::SyncCommitteeContribution, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), @@ -26,10 +32,8 @@ impl From for Error { derive(arbitrary::Arbitrary), arbitrary(bound = "E: EthSpec") )] -#[derive( - Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Derivative, -)] -#[derivative(PartialEq, Hash(bound = "E: EthSpec"))] +#[derive(Debug, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Educe)] +#[educe(PartialEq, Hash(bound(E: EthSpec)))] #[serde(bound = "E: EthSpec")] #[context_deserialize(ForkName)] pub struct SyncAggregate { diff --git a/consensus/types/src/sync_aggregator_selection_data.rs b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs similarity index 82% rename from consensus/types/src/sync_aggregator_selection_data.rs rename to consensus/types/src/sync_committee/sync_aggregator_selection_data.rs index a280369fea3..e905ca036b3 100644 --- a/consensus/types/src/sync_aggregator_selection_data.rs +++ b/consensus/types/src/sync_committee/sync_aggregator_selection_data.rs @@ -1,11 +1,15 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{ForkName, SignedRoot, Slot}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{SignedRoot, Slot}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Clone, Serialize, Deserialize, Hash, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee/sync_committee.rs similarity index 95% rename from consensus/types/src/sync_committee.rs rename to consensus/types/src/sync_committee/sync_committee.rs index a9fde425540..54484118002 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee/sync_committee.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{EthSpec, FixedVector, ForkName, SyncSubnetId}; +use std::collections::HashMap; + use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; +use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::EthSpec, fork::ForkName, sync_committee::SyncSubnetId, test_utils::TestRandom}; + #[derive(Debug, PartialEq)] pub enum Error { ArithError(ArithError), diff --git a/consensus/types/src/sync_committee_contribution.rs b/consensus/types/src/sync_committee/sync_committee_contribution.rs similarity index 93% rename from consensus/types/src/sync_committee_contribution.rs rename to consensus/types/src/sync_committee/sync_committee_contribution.rs index db22a3bdbc8..09376fbe5c0 100644 --- a/consensus/types/src/sync_committee_contribution.rs +++ b/consensus/types/src/sync_committee/sync_committee_contribution.rs @@ -1,12 +1,18 @@ -use super::{AggregateSignature, EthSpec, ForkName, SignedRoot}; -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::{BitVector, Hash256, Slot, SyncCommitteeMessage, test_utils::TestRandom}; +use bls::AggregateSignature; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::BitVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::ForkName, + sync_committee::SyncCommitteeMessage, + test_utils::TestRandom, +}; + #[derive(Debug, PartialEq)] pub enum Error { SszTypesError(ssz_types::Error), diff --git a/consensus/types/src/sync_committee_message.rs b/consensus/types/src/sync_committee/sync_committee_message.rs similarity index 88% rename from consensus/types/src/sync_committee_message.rs rename to consensus/types/src/sync_committee/sync_committee_message.rs index d5bb7250bb4..ed42555c43f 100644 --- a/consensus/types/src/sync_committee_message.rs +++ b/consensus/types/src/sync_committee/sync_committee_message.rs @@ -1,14 +1,16 @@ -use crate::context_deserialize; -use crate::slot_data::SlotData; -use crate::test_utils::TestRandom; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, ForkName, Hash256, SecretKey, Signature, SignedRoot, Slot, -}; +use bls::{SecretKey, Signature}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, SlotData}, + fork::{Fork, ForkName}, + test_utils::TestRandom, +}; + /// The data upon which a `SyncCommitteeContribution` is based. #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom)] diff --git a/consensus/types/src/sync_committee_subscription.rs b/consensus/types/src/sync_committee/sync_committee_subscription.rs similarity index 96% rename from consensus/types/src/sync_committee_subscription.rs rename to consensus/types/src/sync_committee/sync_committee_subscription.rs index 8e040279d73..6365b015dd2 100644 --- a/consensus/types/src/sync_committee_subscription.rs +++ b/consensus/types/src/sync_committee/sync_committee_subscription.rs @@ -1,7 +1,8 @@ -use crate::Epoch; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::core::Epoch; + /// A sync committee subscription created when a validator subscribes to sync committee subnets to perform /// sync committee duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] diff --git a/consensus/types/src/sync_duty.rs b/consensus/types/src/sync_committee/sync_duty.rs similarity index 96% rename from consensus/types/src/sync_duty.rs rename to consensus/types/src/sync_committee/sync_duty.rs index 59fbc960db5..773cc008f9f 100644 --- a/consensus/types/src/sync_duty.rs +++ b/consensus/types/src/sync_committee/sync_duty.rs @@ -1,8 +1,13 @@ -use crate::{EthSpec, SyncCommittee, SyncSubnetId}; +use std::collections::HashSet; + use bls::PublicKeyBytes; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; -use std::collections::HashSet; + +use crate::{ + core::EthSpec, + sync_committee::{SyncCommittee, SyncSubnetId}, +}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct SyncDuty { diff --git a/consensus/types/src/sync_selection_proof.rs b/consensus/types/src/sync_committee/sync_selection_proof.rs similarity index 90% rename from consensus/types/src/sync_selection_proof.rs rename to consensus/types/src/sync_committee/sync_selection_proof.rs index b1e9e8186f5..723f0c06c96 100644 --- a/consensus/types/src/sync_selection_proof.rs +++ b/consensus/types/src/sync_committee/sync_selection_proof.rs @@ -1,16 +1,20 @@ -use crate::consts::altair::{ - SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE, -}; -use crate::{ - ChainSpec, Domain, EthSpec, Fork, Hash256, PublicKey, SecretKey, Signature, SignedRoot, Slot, - SyncAggregatorSelectionData, -}; +use std::cmp; + +use bls::{PublicKey, SecretKey, Signature}; use ethereum_hashing::hash; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; use ssz::Encode; -use ssz_types::typenum::Unsigned; -use std::cmp; +use typenum::Unsigned; + +use crate::{ + core::{ + ChainSpec, Domain, EthSpec, Hash256, SignedRoot, Slot, + consts::altair::{SYNC_COMMITTEE_SUBNET_COUNT, TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE}, + }, + fork::Fork, + sync_committee::SyncAggregatorSelectionData, +}; #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] @@ -108,8 +112,9 @@ impl From for SyncSelectionProof { #[cfg(test)] mod test { use super::*; - use crate::{FixedBytesExtended, MainnetEthSpec}; + use crate::MainnetEthSpec; use eth2_interop_keypairs::keypair; + use fixed_bytes::FixedBytesExtended; #[test] fn proof_sign_and_verify() { diff --git a/consensus/types/src/sync_subnet_id.rs b/consensus/types/src/sync_committee/sync_subnet_id.rs similarity index 90% rename from consensus/types/src/sync_subnet_id.rs rename to consensus/types/src/sync_committee/sync_subnet_id.rs index 3d0d853fcaa..6cb11f6b038 100644 --- a/consensus/types/src/sync_subnet_id.rs +++ b/consensus/types/src/sync_committee/sync_subnet_id.rs @@ -1,13 +1,16 @@ //! Identifies each sync committee subnet by an integer identifier. -use crate::EthSpec; -use crate::consts::altair::SYNC_COMMITTEE_SUBNET_COUNT; +use std::{ + collections::HashSet, + fmt::{self, Display}, + ops::{Deref, DerefMut}, + sync::LazyLock, +}; + use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Serialize}; -use ssz_types::typenum::Unsigned; -use std::collections::HashSet; -use std::fmt::{self, Display}; -use std::ops::{Deref, DerefMut}; -use std::sync::LazyLock; +use typenum::Unsigned; + +use crate::core::{EthSpec, consts::altair::SYNC_COMMITTEE_SUBNET_COUNT}; static SYNC_SUBNET_ID_TO_STRING: LazyLock> = LazyLock::new(|| { let mut v = Vec::with_capacity(SYNC_COMMITTEE_SUBNET_COUNT as usize); diff --git a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs index f30afda257e..5ccd748c25c 100644 --- a/consensus/types/src/test_utils/generate_deterministic_keypairs.rs +++ b/consensus/types/src/test_utils/generate_deterministic_keypairs.rs @@ -1,7 +1,8 @@ -use crate::*; +use std::path::PathBuf; + +use bls::Keypair; use eth2_interop_keypairs::{keypair, keypairs_from_yaml_file}; use rayon::prelude::*; -use std::path::PathBuf; use tracing::debug; /// Generates `validator_count` keypairs where the secret key is derived solely from the index of diff --git a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs index 0f52e485a8a..cf7b5df891a 100644 --- a/consensus/types/src/test_utils/generate_random_block_and_blobs.rs +++ b/consensus/types/src/test_utils/generate_random_block_and_blobs.rs @@ -1,11 +1,16 @@ -use rand::Rng; - +use bls::Signature; use kzg::{KzgCommitment, KzgProof}; +use rand::Rng; -use crate::beacon_block_body::KzgCommitments; -use crate::*; - -use super::*; +use crate::{ + block::{BeaconBlock, SignedBeaconBlock}, + core::{EthSpec, MainnetEthSpec}, + data::{Blob, BlobSidecar, BlobsList}, + execution::FullPayload, + fork::{ForkName, map_fork_name}, + kzg_ext::{KzgCommitments, KzgProofs}, + test_utils::TestRandom, +}; type BlobsBundle = (KzgCommitments, KzgProofs, BlobsList); @@ -73,11 +78,12 @@ pub fn generate_blobs(n_blobs: usize) -> Result, Stri mod test { use super::*; use rand::rng; + use ssz_types::FixedVector; #[test] fn test_verify_blob_inclusion_proof() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 2, &mut rng()); for blob in blobs { assert!(blob.verify_blob_sidecar_inclusion_proof()); } @@ -115,7 +121,7 @@ mod test { #[test] fn test_verify_blob_inclusion_proof_invalid() { let (_block, blobs) = - generate_rand_block_and_blobs::(ForkName::Deneb, 6, &mut rng()); + generate_rand_block_and_blobs::(ForkName::Deneb, 1, &mut rng()); for mut blob in blobs { blob.kzg_commitment_inclusion_proof = FixedVector::random_for_test(&mut rng()); diff --git a/consensus/types/src/test_utils/mod.rs b/consensus/types/src/test_utils/mod.rs index 37d58d43420..c4409b43924 100644 --- a/consensus/types/src/test_utils/mod.rs +++ b/consensus/types/src/test_utils/mod.rs @@ -1,17 +1,5 @@ #![allow(clippy::arithmetic_side_effects)] -use std::fmt::Debug; - -pub use rand::{RngCore, SeedableRng}; -pub use rand_xorshift::XorShiftRng; - -pub use generate_deterministic_keypairs::generate_deterministic_keypair; -pub use generate_deterministic_keypairs::generate_deterministic_keypairs; -pub use generate_deterministic_keypairs::load_keypairs_from_yaml; -use ssz::{Decode, Encode, ssz_encode}; -pub use test_random::{TestRandom, test_random_instance}; -use tree_hash::TreeHash; - #[macro_use] mod macros; mod generate_deterministic_keypairs; @@ -19,6 +7,18 @@ mod generate_deterministic_keypairs; mod generate_random_block_and_blobs; mod test_random; +pub use generate_deterministic_keypairs::generate_deterministic_keypair; +pub use generate_deterministic_keypairs::generate_deterministic_keypairs; +pub use generate_deterministic_keypairs::load_keypairs_from_yaml; +pub use test_random::{TestRandom, test_random_instance}; + +pub use rand::{RngCore, SeedableRng}; +pub use rand_xorshift::XorShiftRng; + +use ssz::{Decode, Encode, ssz_encode}; +use std::fmt::Debug; +use tree_hash::TreeHash; + pub fn test_ssz_tree_hash_pair(v1: &T, v2: &U) where T: TreeHash + Encode + Decode + Debug + PartialEq, diff --git a/consensus/types/src/test_utils/test_random/address.rs b/consensus/types/src/test_utils/test_random/address.rs index 421801ce53c..2f601cb91ec 100644 --- a/consensus/types/src/test_utils/test_random/address.rs +++ b/consensus/types/src/test_utils/test_random/address.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Address, test_utils::TestRandom}; impl TestRandom for Address { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 20]; rng.fill_bytes(&mut key_bytes); Address::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/aggregate_signature.rs b/consensus/types/src/test_utils/test_random/aggregate_signature.rs index 772f2844313..f9f3dd95677 100644 --- a/consensus/types/src/test_utils/test_random/aggregate_signature.rs +++ b/consensus/types/src/test_utils/test_random/aggregate_signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{AggregateSignature, Signature}; + +use crate::test_utils::TestRandom; impl TestRandom for AggregateSignature { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let signature = Signature::random_for_test(rng); let mut aggregate_signature = AggregateSignature::infinity(); aggregate_signature.add_assign(&signature); diff --git a/consensus/types/src/test_utils/test_random/bitfield.rs b/consensus/types/src/test_utils/test_random/bitfield.rs index e335ac7fe8b..762f41eb34a 100644 --- a/consensus/types/src/test_utils/test_random/bitfield.rs +++ b/consensus/types/src/test_utils/test_random/bitfield.rs @@ -1,8 +1,11 @@ -use super::*; use smallvec::smallvec; +use ssz_types::{BitList, BitVector}; +use typenum::Unsigned; + +use crate::test_utils::TestRandom; impl TestRandom for BitList { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let initial_len = std::cmp::max(1, N::to_usize().div_ceil(8)); let mut raw_bytes = smallvec![0; initial_len]; rng.fill_bytes(&mut raw_bytes); @@ -23,7 +26,7 @@ impl TestRandom for BitList { } impl TestRandom for BitVector { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut raw_bytes = smallvec![0; std::cmp::max(1, N::to_usize().div_ceil(8))]; rng.fill_bytes(&mut raw_bytes); // If N isn't divisible by 8 diff --git a/consensus/types/src/test_utils/test_random/hash256.rs b/consensus/types/src/test_utils/test_random/hash256.rs index 21d443c0e2a..4d7570fb55c 100644 --- a/consensus/types/src/test_utils/test_random/hash256.rs +++ b/consensus/types/src/test_utils/test_random/hash256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Hash256, test_utils::TestRandom}; impl TestRandom for Hash256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = vec![0; 32]; rng.fill_bytes(&mut key_bytes); Hash256::from_slice(&key_bytes[..]) diff --git a/consensus/types/src/test_utils/test_random/kzg_commitment.rs b/consensus/types/src/test_utils/test_random/kzg_commitment.rs index a4030f2b6a3..31e316a1987 100644 --- a/consensus/types/src/test_utils/test_random/kzg_commitment.rs +++ b/consensus/types/src/test_utils/test_random/kzg_commitment.rs @@ -1,4 +1,6 @@ -use super::*; +use kzg::KzgCommitment; + +use crate::test_utils::TestRandom; impl TestRandom for KzgCommitment { fn random_for_test(rng: &mut impl rand::RngCore) -> Self { diff --git a/consensus/types/src/test_utils/test_random/kzg_proof.rs b/consensus/types/src/test_utils/test_random/kzg_proof.rs index 7e771ca5660..4465d5ab39d 100644 --- a/consensus/types/src/test_utils/test_random/kzg_proof.rs +++ b/consensus/types/src/test_utils/test_random/kzg_proof.rs @@ -1,8 +1,9 @@ -use super::*; -use kzg::BYTES_PER_COMMITMENT; +use kzg::{BYTES_PER_COMMITMENT, KzgProof}; + +use crate::test_utils::TestRandom; impl TestRandom for KzgProof { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut bytes = [0; BYTES_PER_COMMITMENT]; rng.fill_bytes(&mut bytes); Self(bytes) diff --git a/consensus/types/src/test_utils/test_random/mod.rs b/consensus/types/src/test_utils/test_random/mod.rs new file mode 100644 index 00000000000..41812593fa7 --- /dev/null +++ b/consensus/types/src/test_utils/test_random/mod.rs @@ -0,0 +1,15 @@ +mod address; +mod aggregate_signature; +mod bitfield; +mod hash256; +mod kzg_commitment; +mod kzg_proof; +mod public_key; +mod public_key_bytes; +mod secret_key; +mod signature; +mod signature_bytes; +mod test_random; +mod uint256; + +pub use test_random::{TestRandom, test_random_instance}; diff --git a/consensus/types/src/test_utils/test_random/public_key.rs b/consensus/types/src/test_utils/test_random/public_key.rs index d33e9ac7043..9d287c23d73 100644 --- a/consensus/types/src/test_utils/test_random/public_key.rs +++ b/consensus/types/src/test_utils/test_random/public_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::{PublicKey, SecretKey}; + +use crate::test_utils::TestRandom; impl TestRandom for PublicKey { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { SecretKey::random_for_test(rng).public_key() } } diff --git a/consensus/types/src/test_utils/test_random/public_key_bytes.rs b/consensus/types/src/test_utils/test_random/public_key_bytes.rs index 6e5cafc4f03..587c3baf8fb 100644 --- a/consensus/types/src/test_utils/test_random/public_key_bytes.rs +++ b/consensus/types/src/test_utils/test_random/public_key_bytes.rs @@ -1,9 +1,9 @@ -use bls::PUBLIC_KEY_BYTES_LEN; +use bls::{PUBLIC_KEY_BYTES_LEN, PublicKey, PublicKeyBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for PublicKeyBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random/secret_key.rs b/consensus/types/src/test_utils/test_random/secret_key.rs index da1614aa24e..a8295d968af 100644 --- a/consensus/types/src/test_utils/test_random/secret_key.rs +++ b/consensus/types/src/test_utils/test_random/secret_key.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::SecretKey; + +use crate::test_utils::TestRandom; impl TestRandom for SecretKey { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: Not deterministic generation. Using `SecretKey::deserialize` results in // `BlstError(BLST_BAD_ENCODING)`, need to debug with blst source on what encoding expects. SecretKey::random() diff --git a/consensus/types/src/test_utils/test_random/signature.rs b/consensus/types/src/test_utils/test_random/signature.rs index 8bc0d711103..006aba9650a 100644 --- a/consensus/types/src/test_utils/test_random/signature.rs +++ b/consensus/types/src/test_utils/test_random/signature.rs @@ -1,7 +1,9 @@ -use super::*; +use bls::Signature; + +use crate::test_utils::TestRandom; impl TestRandom for Signature { - fn random_for_test(_rng: &mut impl RngCore) -> Self { + fn random_for_test(_rng: &mut impl rand::RngCore) -> Self { // TODO: `SecretKey::random_for_test` does not return a deterministic signature. Since this // signature will not pass verification we could just return the generator point or the // generator point multiplied by a random scalar if we want disctint signatures. diff --git a/consensus/types/src/test_utils/test_random/signature_bytes.rs b/consensus/types/src/test_utils/test_random/signature_bytes.rs index 2117a482321..6992e574679 100644 --- a/consensus/types/src/test_utils/test_random/signature_bytes.rs +++ b/consensus/types/src/test_utils/test_random/signature_bytes.rs @@ -1,9 +1,9 @@ -use bls::SIGNATURE_BYTES_LEN; +use bls::{SIGNATURE_BYTES_LEN, Signature, SignatureBytes}; -use super::*; +use crate::test_utils::TestRandom; impl TestRandom for SignatureBytes { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { //50-50 chance for signature to be "valid" or invalid if bool::random_for_test(rng) { //valid signature diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random/test_random.rs similarity index 90% rename from consensus/types/src/test_utils/test_random.rs rename to consensus/types/src/test_utils/test_random/test_random.rs index 98bb8565dd6..101fbec51b0 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random/test_random.rs @@ -1,23 +1,10 @@ -use crate::*; -use rand::RngCore; -use rand::SeedableRng; +use std::{marker::PhantomData, sync::Arc}; + +use rand::{RngCore, SeedableRng}; use rand_xorshift::XorShiftRng; use smallvec::{SmallVec, smallvec}; -use std::marker::PhantomData; -use std::sync::Arc; - -mod address; -mod aggregate_signature; -mod bitfield; -mod hash256; -mod kzg_commitment; -mod kzg_proof; -mod public_key; -mod public_key_bytes; -mod secret_key; -mod signature; -mod signature_bytes; -mod uint256; +use ssz_types::VariableList; +use typenum::Unsigned; pub fn test_random_instance() -> T { let mut rng = XorShiftRng::from_seed([0x42; 16]); @@ -115,7 +102,7 @@ where } } - output.into() + output.try_into().unwrap() } } diff --git a/consensus/types/src/test_utils/test_random/uint256.rs b/consensus/types/src/test_utils/test_random/uint256.rs index 30077f0e0f6..eccf4765955 100644 --- a/consensus/types/src/test_utils/test_random/uint256.rs +++ b/consensus/types/src/test_utils/test_random/uint256.rs @@ -1,7 +1,7 @@ -use super::*; +use crate::{core::Uint256, test_utils::TestRandom}; impl TestRandom for Uint256 { - fn random_for_test(rng: &mut impl RngCore) -> Self { + fn random_for_test(rng: &mut impl rand::RngCore) -> Self { let mut key_bytes = [0; 32]; rng.fill_bytes(&mut key_bytes); Self::from_le_slice(&key_bytes[..]) diff --git a/consensus/types/src/validator/mod.rs b/consensus/types/src/validator/mod.rs new file mode 100644 index 00000000000..8a67407821c --- /dev/null +++ b/consensus/types/src/validator/mod.rs @@ -0,0 +1,9 @@ +mod proposer_preparation_data; +mod validator; +mod validator_registration_data; +mod validator_subscription; + +pub use proposer_preparation_data::ProposerPreparationData; +pub use validator::{Validator, is_compounding_withdrawal_credential}; +pub use validator_registration_data::{SignedValidatorRegistrationData, ValidatorRegistrationData}; +pub use validator_subscription::ValidatorSubscription; diff --git a/consensus/types/src/proposer_preparation_data.rs b/consensus/types/src/validator/proposer_preparation_data.rs similarity index 95% rename from consensus/types/src/proposer_preparation_data.rs rename to consensus/types/src/validator/proposer_preparation_data.rs index 477fb3b9d15..8ef675de4fd 100644 --- a/consensus/types/src/proposer_preparation_data.rs +++ b/consensus/types/src/validator/proposer_preparation_data.rs @@ -1,6 +1,7 @@ -use crate::*; use serde::{Deserialize, Serialize}; +use crate::core::Address; + /// A proposer preparation, created when a validator prepares the beacon node for potential proposers /// by supplying information required when proposing blocks for the given validators. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone)] diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator/validator.rs similarity index 97% rename from consensus/types/src/validator.rs rename to consensus/types/src/validator/validator.rs index dec8bba627f..7898ab9073a 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator/validator.rs @@ -1,13 +1,19 @@ -use crate::context_deserialize; -use crate::{ - Address, BeaconState, ChainSpec, Checkpoint, Epoch, EthSpec, FixedBytesExtended, ForkName, - Hash256, PublicKeyBytes, test_utils::TestRandom, -}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + attestation::Checkpoint, + core::{Address, ChainSpec, Epoch, EthSpec, Hash256}, + fork::ForkName, + state::BeaconState, + test_utils::TestRandom, +}; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 diff --git a/consensus/types/src/validator_registration_data.rs b/consensus/types/src/validator/validator_registration_data.rs similarity index 93% rename from consensus/types/src/validator_registration_data.rs rename to consensus/types/src/validator/validator_registration_data.rs index 345771074c5..a0a1df7dc54 100644 --- a/consensus/types/src/validator_registration_data.rs +++ b/consensus/types/src/validator/validator_registration_data.rs @@ -1,8 +1,10 @@ -use crate::*; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use tree_hash_derive::TreeHash; +use crate::core::{Address, ChainSpec, SignedRoot}; + /// Validator registration, for use in interacting with servers implementing the builder API. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode)] pub struct SignedValidatorRegistrationData { diff --git a/consensus/types/src/validator_subscription.rs b/consensus/types/src/validator/validator_subscription.rs similarity index 93% rename from consensus/types/src/validator_subscription.rs rename to consensus/types/src/validator/validator_subscription.rs index 62932638ec1..92fb200e10d 100644 --- a/consensus/types/src/validator_subscription.rs +++ b/consensus/types/src/validator/validator_subscription.rs @@ -1,7 +1,8 @@ -use crate::*; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use crate::{attestation::CommitteeIndex, core::Slot}; + /// A validator subscription, created when a validator subscribes to a slot to perform optional aggregation /// duties. #[derive(PartialEq, Debug, Serialize, Deserialize, Clone, Encode, Decode, Eq, PartialOrd, Ord)] diff --git a/consensus/types/src/withdrawal/mod.rs b/consensus/types/src/withdrawal/mod.rs new file mode 100644 index 00000000000..bac80d00bed --- /dev/null +++ b/consensus/types/src/withdrawal/mod.rs @@ -0,0 +1,9 @@ +mod pending_partial_withdrawal; +mod withdrawal; +mod withdrawal_credentials; +mod withdrawal_request; + +pub use pending_partial_withdrawal::PendingPartialWithdrawal; +pub use withdrawal::{Withdrawal, Withdrawals}; +pub use withdrawal_credentials::WithdrawalCredentials; +pub use withdrawal_request::WithdrawalRequest; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs similarity index 85% rename from consensus/types/src/pending_partial_withdrawal.rs rename to consensus/types/src/withdrawal/pending_partial_withdrawal.rs index e9b10f79b5f..cd866369a47 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/withdrawal/pending_partial_withdrawal.rs @@ -1,11 +1,11 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Epoch, ForkName}; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Epoch, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/withdrawal.rs b/consensus/types/src/withdrawal/withdrawal.rs similarity index 73% rename from consensus/types/src/withdrawal.rs rename to consensus/types/src/withdrawal/withdrawal.rs index ef4a1f285d3..d75bd4f501f 100644 --- a/consensus/types/src/withdrawal.rs +++ b/consensus/types/src/withdrawal/withdrawal.rs @@ -1,10 +1,16 @@ -use crate::test_utils::TestRandom; -use crate::*; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{ + core::{Address, EthSpec}, + fork::ForkName, + test_utils::TestRandom, +}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, @@ -21,6 +27,8 @@ pub struct Withdrawal { pub amount: u64, } +pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; + #[cfg(test)] mod tests { use super::*; diff --git a/consensus/types/src/withdrawal_credentials.rs b/consensus/types/src/withdrawal/withdrawal_credentials.rs similarity index 91% rename from consensus/types/src/withdrawal_credentials.rs rename to consensus/types/src/withdrawal/withdrawal_credentials.rs index 52d51ed559c..b732222ca1b 100644 --- a/consensus/types/src/withdrawal_credentials.rs +++ b/consensus/types/src/withdrawal/withdrawal_credentials.rs @@ -1,5 +1,6 @@ -use crate::*; -use bls::get_withdrawal_credentials; +use bls::{PublicKey, get_withdrawal_credentials}; + +use crate::core::{Address, ChainSpec, Hash256}; pub struct WithdrawalCredentials(Hash256); @@ -27,7 +28,7 @@ impl From for Hash256 { #[cfg(test)] mod test { use super::*; - use crate::test_utils::generate_deterministic_keypair; + use crate::{EthSpec, MainnetEthSpec, test_utils::generate_deterministic_keypair}; use std::str::FromStr; #[test] diff --git a/consensus/types/src/withdrawal_request.rs b/consensus/types/src/withdrawal/withdrawal_request.rs similarity index 87% rename from consensus/types/src/withdrawal_request.rs rename to consensus/types/src/withdrawal/withdrawal_request.rs index c08921a68c4..98a40016f9f 100644 --- a/consensus/types/src/withdrawal_request.rs +++ b/consensus/types/src/withdrawal/withdrawal_request.rs @@ -1,12 +1,13 @@ -use crate::context_deserialize; -use crate::test_utils::TestRandom; -use crate::{Address, ForkName, PublicKeyBytes}; +use bls::PublicKeyBytes; +use context_deserialize::context_deserialize; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +use crate::{core::Address, fork::ForkName, test_utils::TestRandom}; + #[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] #[derive( Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/tests/committee_cache.rs similarity index 97% rename from consensus/types/src/beacon_state/committee_cache/tests.rs rename to consensus/types/tests/committee_cache.rs index 1d2ca4ccdb7..751ef05d299 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/tests/committee_cache.rs @@ -1,9 +1,14 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::*; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; use swap_or_not_shuffle::shuffle_list; +use types::*; + +use crate::test_utils::generate_deterministic_keypairs; pub const VALIDATOR_COUNT: usize = 16; diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/tests/state.rs similarity index 97% rename from consensus/types/src/beacon_state/tests.rs rename to consensus/types/tests/state.rs index e5b05a4a5bd..63ab3b8084b 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/tests/state.rs @@ -1,15 +1,17 @@ #![cfg(test)] -use crate::test_utils::*; -use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; -use beacon_chain::types::{ - BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, ChainSpec, Domain, Epoch, - EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, RelativeEpoch, - Slot, Vector, test_utils::TestRandom, -}; -use ssz::Encode; use std::ops::Mul; use std::sync::LazyLock; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; +use milhouse::Vector; +use rand::SeedableRng; +use rand_xorshift::XorShiftRng; +use ssz::Encode; use swap_or_not_shuffle::compute_shuffled_index; +use types::test_utils::{TestRandom, generate_deterministic_keypairs}; +use types::*; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); diff --git a/crypto/bls/src/generic_aggregate_signature.rs b/crypto/bls/src/generic_aggregate_signature.rs index 98a634ee11f..35674394a0d 100644 --- a/crypto/bls/src/generic_aggregate_signature.rs +++ b/crypto/bls/src/generic_aggregate_signature.rs @@ -124,13 +124,15 @@ where /// Aggregates a signature onto `self`. pub fn add_assign(&mut self, other: &GenericSignature) { if let Some(other_point) = other.point() { - self.is_infinity = self.is_infinity && other.is_infinity; if let Some(self_point) = &mut self.point { - self_point.add_assign(other_point) + self_point.add_assign(other_point); + self.is_infinity = self.is_infinity && other.is_infinity; } else { let mut self_point = AggSig::infinity(); self_point.add_assign(other_point); - self.point = Some(self_point) + self.point = Some(self_point); + // the result is infinity, if `other` is + self.is_infinity = other.is_infinity; } } } @@ -138,13 +140,15 @@ where /// Aggregates an aggregate signature onto `self`. pub fn add_assign_aggregate(&mut self, other: &Self) { if let Some(other_point) = other.point() { - self.is_infinity = self.is_infinity && other.is_infinity; if let Some(self_point) = &mut self.point { - self_point.add_assign_aggregate(other_point) + self_point.add_assign_aggregate(other_point); + self.is_infinity = self.is_infinity && other.is_infinity; } else { let mut self_point = AggSig::infinity(); self_point.add_assign_aggregate(other_point); - self.point = Some(self_point) + self.point = Some(self_point); + // the result is infinity, if `other` is + self.is_infinity = other.is_infinity; } } } diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index e7eee050775..e4ad72f3afa 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -164,11 +164,7 @@ impl TAggregateSignature for Aggregate } fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN] { - let mut bytes = [0; SIGNATURE_BYTES_LEN]; - - bytes[..].copy_from_slice(&self.0); - - bytes + self.0 } fn deserialize(bytes: &[u8]) -> Result { diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 00f82bfcecd..1827ea99210 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -356,6 +356,17 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + #[test] + fn empty_aggregate_plus_infinity_should_be_infinity() { + let mut agg = AggregateSignature::empty(); + let infinity_sig = Signature::deserialize(&INFINITY_SIGNATURE).unwrap(); + agg.add_assign(&infinity_sig); + assert!( + agg.is_infinity(), + "is_infinity flag should be true after adding infinity to empty" + ); + } + #[test] fn deserialize_infinity_public_key() { PublicKey::deserialize(&bls::INFINITY_PUBLIC_KEY).unwrap_err(); diff --git a/crypto/eth2_key_derivation/Cargo.toml b/crypto/eth2_key_derivation/Cargo.toml index a893a9360dc..b8976b8ccb3 100644 --- a/crypto/eth2_key_derivation/Cargo.toml +++ b/crypto/eth2_key_derivation/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_key_derivation" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -14,3 +15,7 @@ zeroize = { workspace = true } [dev-dependencies] hex = { workspace = true } + +[[test]] +name = "eth2_key_derivation_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_key_derivation/tests/main.rs b/crypto/eth2_key_derivation/tests/main.rs new file mode 100644 index 00000000000..a239eaa6185 --- /dev/null +++ b/crypto/eth2_key_derivation/tests/main.rs @@ -0,0 +1,2 @@ +mod eip2333_vectors; +mod tests; diff --git a/crypto/eth2_keystore/Cargo.toml b/crypto/eth2_keystore/Cargo.toml index 61d2722efbd..290a10adc9a 100644 --- a/crypto/eth2_keystore/Cargo.toml +++ b/crypto/eth2_keystore/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_keystore" version = "0.1.0" authors = ["Pawan Dhananjay "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -24,3 +25,7 @@ zeroize = { workspace = true } [dev-dependencies] tempfile = { workspace = true } + +[[test]] +name = "eth2_keystore_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_keystore/tests/main.rs b/crypto/eth2_keystore/tests/main.rs new file mode 100644 index 00000000000..79b31d5eda5 --- /dev/null +++ b/crypto/eth2_keystore/tests/main.rs @@ -0,0 +1,4 @@ +mod eip2335_vectors; +mod json; +mod params; +mod tests; diff --git a/crypto/eth2_wallet/Cargo.toml b/crypto/eth2_wallet/Cargo.toml index 5327bdc163b..0d454016a6b 100644 --- a/crypto/eth2_wallet/Cargo.toml +++ b/crypto/eth2_wallet/Cargo.toml @@ -3,6 +3,7 @@ name = "eth2_wallet" version = "0.1.0" authors = ["Paul Hauner "] edition = { workspace = true } +autotests = false # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] @@ -18,3 +19,7 @@ uuid = { workspace = true } [dev-dependencies] hex = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "eth2_wallet_tests" +path = "tests/main.rs" diff --git a/crypto/eth2_wallet/tests/main.rs b/crypto/eth2_wallet/tests/main.rs new file mode 100644 index 00000000000..d59ccff6392 --- /dev/null +++ b/crypto/eth2_wallet/tests/main.rs @@ -0,0 +1,3 @@ +mod eip2386_vectors; +mod json; +mod tests; diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index 432fcc1792e..5a36eb74f70 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" [dependencies] arbitrary = { workspace = true } c-kzg = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_hashing = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/crypto/kzg/src/kzg_commitment.rs b/crypto/kzg/src/kzg_commitment.rs index cfab09f63e7..5a5e689429e 100644 --- a/crypto/kzg/src/kzg_commitment.rs +++ b/crypto/kzg/src/kzg_commitment.rs @@ -1,5 +1,5 @@ use c_kzg::BYTES_PER_COMMITMENT; -use derivative::Derivative; +use educe::Educe; use ethereum_hashing::hash_fixed; use serde::de::{Deserialize, Deserializer}; use serde::ser::{Serialize, Serializer}; @@ -11,8 +11,8 @@ use tree_hash::{Hash256, PackedEncoding, TreeHash}; pub const VERSIONED_HASH_VERSION_KZG: u8 = 0x01; -#[derive(Derivative, Clone, Copy, Encode, Decode)] -#[derivative(PartialEq, Eq, Hash)] +#[derive(Educe, Clone, Copy, Encode, Decode)] +#[educe(PartialEq, Eq, Hash)] #[ssz(struct_behaviour = "transparent")] pub struct KzgCommitment(pub [u8; c_kzg::BYTES_PER_COMMITMENT]); diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index 6bb7531493d..608400fa7ed 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -21,7 +21,7 @@ use store::{ errors::Error, metadata::{CURRENT_SCHEMA_VERSION, SchemaVersion}, }; -use strum::{EnumString, EnumVariantNames}; +use strum::{EnumString, VariantNames}; use tracing::{info, warn}; use types::{BeaconState, EthSpec, Slot}; @@ -80,7 +80,7 @@ pub fn display_db_version( } #[derive( - Debug, PartialEq, Eq, Clone, EnumString, Deserialize, Serialize, EnumVariantNames, ValueEnum, + Debug, PartialEq, Eq, Clone, EnumString, Deserialize, Serialize, VariantNames, ValueEnum, )] pub enum InspectTarget { #[strum(serialize = "sizes")] diff --git a/deny.toml b/deny.toml new file mode 100644 index 00000000000..398a173dfa4 --- /dev/null +++ b/deny.toml @@ -0,0 +1,23 @@ +# cargo-deny configuration for Lighthouse +# See https://embarkstudios.github.io/cargo-deny/ + +[bans] +# Allow multiple versions by default. Change this to "warn" to see all multiple versions. +multiple-versions = "allow" +deny = [ + { crate = "ethers", reason = "legacy Ethereum crate, use alloy instead" }, + { crate = "ethereum-types", reason = "legacy Ethereum crate, use alloy-primitives instead" }, + { crate = "protobuf", reason = "use quick-protobuf instead" }, + { crate = "derivative", reason = "use educe or derive_more instead" }, + { crate = "ark-ff", reason = "present in Cargo.lock but not needed by Lighthouse" }, + { crate = "strum", deny-multiple-versions = true, reason = "takes a long time to compile" }, + { crate = "reqwest", deny-multiple-versions = true, reason = "takes a long time to compile" } +] + +[sources] +unknown-registry = "deny" +unknown-git = "warn" +allow-registry = ["https://github.com/rust-lang/crates.io-index"] + +[sources.allow-org] +github = ["sigp"] diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 928b57f9bb5..43e361b60df 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "8.0.1" +version = { workspace = true } authors = ["Paul Hauner "] edition = { workspace = true } @@ -26,6 +26,7 @@ eth2_wallet = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } hex = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index ddd36e7e7a9..620539a95f1 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -1,4 +1,5 @@ use clap::ArgMatches; +use fixed_bytes::FixedBytesExtended; use lighthouse_network::{ NETWORK_KEY_FILENAME, NetworkConfig, discovery::{CombinedKey, ENR_FILENAME, build_enr}, @@ -9,7 +10,7 @@ use std::io::Write; use std::path::PathBuf; use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; -use types::{ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, Hash256}; +use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; @@ -43,7 +44,7 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str &enr_key, &config, &enr_fork_id, - None, + spec.custody_requirement, genesis_fork_digest, spec, ) diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs index 6f7dcdb5956..6a0eb2a0e1d 100644 --- a/lcli/src/http_sync.rs +++ b/lcli/src/http_sync.rs @@ -132,15 +132,14 @@ async fn get_block_from_source( let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source .iter() - .cloned() .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) .unzip(); let block_root = block_from_source.canonical_root(); let block_contents = SignedBlockContents { signed_block: Arc::new(block_from_source), - kzg_proofs: kzg_proofs.into(), - blobs: blobs.into(), + kzg_proofs: kzg_proofs.try_into().unwrap(), + blobs: blobs.try_into().unwrap(), }; let publish_block_req = PublishBlockRequest::BlockContents(block_contents); diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index ee6485b2388..d6bdfb0d712 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -44,7 +44,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< amsterdam_time, }; let kzg = None; - let server: MockServer = MockServer::new_with_config(&handle, config, spec, kzg); + let server: MockServer = MockServer::new_with_config(&handle, config, kzg); if all_payloads_valid { eprintln!( diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index bde8aae0af3..ebe00c9be59 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "8.0.1" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } autotests = false diff --git a/lighthouse/tests/account_manager.rs b/lighthouse/tests/account_manager.rs index 0b945bcb2d4..9bfcae85e57 100644 --- a/lighthouse/tests/account_manager.rs +++ b/lighthouse/tests/account_manager.rs @@ -18,6 +18,7 @@ use account_utils::{ eth2_keystore::KeystoreBuilder, validator_definitions::{SigningDefinition, ValidatorDefinition, ValidatorDefinitions}, }; +use bls::{Keypair, PublicKey}; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use std::env; use std::fs::{self, File}; @@ -26,7 +27,6 @@ use std::path::{Path, PathBuf}; use std::process::{Child, Command, Output, Stdio}; use std::str::from_utf8; use tempfile::{TempDir, tempdir}; -use types::{Keypair, PublicKey}; use validator_dir::ValidatorDir; use zeroize::Zeroizing; diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 8342b021738..207324ea33f 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -481,7 +481,12 @@ fn run_execution_jwt_secret_key_is_persisted() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoint.as_ref().unwrap().full.to_string(), + config + .execution_endpoint + .as_ref() + .unwrap() + .expose_full() + .to_string(), "http://localhost:8551/" ); let mut file_jwt_secret_key = String::new(); @@ -532,7 +537,12 @@ fn bellatrix_jwt_secrets_flag() { .with_config(|config| { let config = config.execution_layer.as_ref().unwrap(); assert_eq!( - config.execution_endpoint.as_ref().unwrap().full.to_string(), + config + .execution_endpoint + .as_ref() + .unwrap() + .expose_full() + .to_string(), "http://localhost:8551/" ); assert_eq!( diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index 398c6fbd6b2..ee3e910b369 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -109,12 +109,12 @@ fn beacon_nodes_flag() { .run() .with_config(|config| { assert_eq!( - config.beacon_nodes[0].full.to_string(), + config.beacon_nodes[0].expose_full().to_string(), "http://localhost:1001/" ); assert_eq!(config.beacon_nodes[0].to_string(), "http://localhost:1001/"); assert_eq!( - config.beacon_nodes[1].full.to_string(), + config.beacon_nodes[1].expose_full().to_string(), "https://project:secret@infura.io/" ); assert_eq!(config.beacon_nodes[1].to_string(), "https://infura.io/"); diff --git a/lighthouse/tests/validator_manager.rs b/lighthouse/tests/validator_manager.rs index 99afa7b6824..d6d720a561d 100644 --- a/lighthouse/tests/validator_manager.rs +++ b/lighthouse/tests/validator_manager.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use eth2::SensitiveUrl; use serde::de::DeserializeOwned; use std::fs; diff --git a/scripts/change_version.sh b/scripts/change_version.sh deleted file mode 100755 index bda87fd8633..00000000000 --- a/scripts/change_version.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# Change the version across multiple files, prior to a release. Use `sed` to -# find/replace the exiting version with the new one. -# -# Takes two arguments: -# -# 1. Current version (e.g., `0.2.6`) -# 2. New version (e.g., `0.2.7`) -# -# ## Example: -# -# `./change_version.sh 0.2.6 0.2.7` - -FROM=$1 -TO=$2 -VERSION_CRATE="../common/lighthouse_version/src/lib.rs" - -update_cargo_toml () { - echo $1 - sed -i -e "s/version = \"$FROM\"/version = \"$TO\"/g" $1 -} - -echo "Changing version from $FROM to $TO" - -update_cargo_toml ../account_manager/Cargo.toml -update_cargo_toml ../beacon_node/Cargo.toml -update_cargo_toml ../boot_node/Cargo.toml -update_cargo_toml ../lcli/Cargo.toml -update_cargo_toml ../lighthouse/Cargo.toml -update_cargo_toml ../validator_client/Cargo.toml - -echo $VERSION_CRATE -sed -i -e "s/$FROM/$TO/g" $VERSION_CRATE diff --git a/scripts/local_testnet/README.md b/scripts/local_testnet/README.md index 9d9844c4c41..6260f910192 100644 --- a/scripts/local_testnet/README.md +++ b/scripts/local_testnet/README.md @@ -21,7 +21,7 @@ cd ./scripts/local_testnet ``` It will build a Lighthouse docker image from the root of the directory and will take an approximately 12 minutes to complete. Once built, the testing will be started automatically. You will see a list of services running and "Started!" at the end. -You can also select your own Lighthouse docker image to use by specifying it in `network_params.yml` under the `cl_image` key. +You can also select your own Lighthouse docker image to use by specifying it in `network_params.yaml` under the `cl_image` key. Full configuration reference for Kurtosis is specified [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). To view all running services: diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index cdfacbced4b..a048674e630 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -1,19 +1,37 @@ # Full configuration reference [here](https://github.com/ethpandaops/ethereum-package?tab=readme-ov-file#configuration). participants: - - el_type: geth + - cl_type: lighthouse + cl_image: lighthouse:local + el_type: geth el_image: ethereum/client-go:latest - cl_type: lighthouse + supernode: true + cl_extra_params: + - --target-peers=3 + count: 2 + - cl_type: lighthouse cl_image: lighthouse:local + el_type: geth + el_image: ethereum/client-go:latest + supernode: false cl_extra_params: - --target-peers=3 - count: 4 + count: 2 network_params: - electra_fork_epoch: 0 - seconds_per_slot: 3 -global_log_level: debug + fulu_fork_epoch: 0 + seconds_per_slot: 6 snooper_enabled: false +global_log_level: debug additional_services: - dora - spamoor - prometheus_grafana - tempo +spamoor_params: + image: ethpandaops/spamoor:master + spammers: + - scenario: eoatx + config: + throughput: 200 + - scenario: blobs + config: + throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml deleted file mode 100644 index e3bc5131531..00000000000 --- a/scripts/local_testnet/network_params_das.yaml +++ /dev/null @@ -1,41 +0,0 @@ -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: true - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - supernode: false - cl_extra_params: - # Note: useful for testing range sync (only produce block if the node is in sync to prevent forking) - - --sync-tolerance-epochs=0 - - --target-peers=3 - count: 2 -network_params: - electra_fork_epoch: 0 - fulu_fork_epoch: 1 - seconds_per_slot: 6 -snooper_enabled: false -global_log_level: debug -additional_services: - - dora - - spamoor - - prometheus_grafana - - tempo -spamoor_params: - image: ethpandaops/spamoor:master - spammers: - - scenario: eoatx - config: - throughput: 200 - - scenario: blobs - config: - throughput: 20 \ No newline at end of file diff --git a/scripts/local_testnet/start_local_testnet.sh b/scripts/local_testnet/start_local_testnet.sh index 442e6fd98d9..8d8b33526d3 100755 --- a/scripts/local_testnet/start_local_testnet.sh +++ b/scripts/local_testnet/start_local_testnet.sh @@ -78,6 +78,11 @@ if [ "$RUN_ASSERTOOR_TESTS" = true ]; then echo "Assertoor has been added to $NETWORK_PARAMS_FILE." fi +if [ "$KEEP_ENCLAVE" = false ]; then + # Stop local testnet + kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true +fi + if [ "$BUILD_IMAGE" = true ]; then echo "Building Lighthouse Docker image." ROOT_DIR="$SCRIPT_DIR/../.." @@ -86,11 +91,6 @@ else echo "Not rebuilding Lighthouse Docker image." fi -if [ "$KEEP_ENCLAVE" = false ]; then - # Stop local testnet - kurtosis enclave rm -f $ENCLAVE_NAME 2>/dev/null || true -fi - kurtosis run --enclave $ENCLAVE_NAME github.com/ethpandaops/ethereum-package@$ETHEREUM_PKG_VERSION --args-file $NETWORK_PARAMS_FILE echo "Started!" diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml deleted file mode 100644 index 2392011ed33..00000000000 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# Kurtosis config file to checkpoint sync to a running devnet supported by ethPandaOps and `ethereum-package`. -participants: - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: true - - cl_type: lighthouse - cl_image: lighthouse:local - el_type: geth - el_image: ethpandaops/geth:master - cl_extra_params: - - --disable-backfill-rate-limiting - supernode: false - -checkpoint_sync_enabled: true -checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-3.ethpandaops.io" - -global_log_level: debug - -network_params: - network: fusaka-devnet-3 diff --git a/scripts/tests/genesis-sync-config-electra.yaml b/scripts/tests/genesis-sync-config-electra.yaml index 153f754c94a..1d1ed4d3152 100644 --- a/scripts/tests/genesis-sync-config-electra.yaml +++ b/scripts/tests/genesis-sync-config-electra.yaml @@ -6,15 +6,14 @@ participants: # nodes without validators, used for testing sync. - cl_type: lighthouse cl_image: lighthouse:local - supernode: true # no supernode in Electra, this is for future proof validator_count: 0 - cl_type: lighthouse cl_image: lighthouse:local - supernode: false validator_count: 0 network_params: seconds_per_slot: 6 electra_fork_epoch: 0 + fulu_fork_epoch: 100000 # a really big number so this test stays in electra preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/genesis-sync-config-fulu.yaml b/scripts/tests/genesis-sync-config-fulu.yaml index 98dc8751d62..6d2c2647a90 100644 --- a/scripts/tests/genesis-sync-config-fulu.yaml +++ b/scripts/tests/genesis-sync-config-fulu.yaml @@ -21,8 +21,7 @@ participants: validator_count: 0 network_params: seconds_per_slot: 6 - electra_fork_epoch: 0 - fulu_fork_epoch: 1 + fulu_fork_epoch: 0 preset: "minimal" additional_services: - tx_fuzz diff --git a/scripts/tests/network_params.yaml b/scripts/tests/network_params.yaml index 0fda1aa34ba..35916ac1e4e 100644 --- a/scripts/tests/network_params.yaml +++ b/scripts/tests/network_params.yaml @@ -6,9 +6,10 @@ participants: cl_image: lighthouse:local cl_extra_params: - --target-peers=3 + supernode: true count: 4 network_params: - electra_fork_epoch: 0 + fulu_fork_epoch: 0 seconds_per_slot: 3 num_validator_keys_per_node: 20 global_log_level: debug diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index b2f6eca9c37..a068b2e8856 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -3,6 +3,7 @@ name = "slasher" version = "0.1.0" authors = ["Michael Sproul "] edition = { workspace = true } +autotests = false [features] default = ["lmdb"] @@ -13,11 +14,13 @@ portable = ["types/portable"] [dependencies] bincode = { workspace = true } +bls = { workspace = true } byteorder = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } flate2 = { version = "1.0.14", features = ["zlib"], default-features = false } lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } @@ -37,9 +40,14 @@ strum = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } [dev-dependencies] maplit = { workspace = true } rayon = { workspace = true } tempfile = { workspace = true } + +[[test]] +name = "slasher_tests" +path = "tests/main.rs" diff --git a/slasher/src/attester_record.rs b/slasher/src/attester_record.rs index 67145193acc..db326a9d80b 100644 --- a/slasher/src/attester_record.rs +++ b/slasher/src/attester_record.rs @@ -1,5 +1,7 @@ use crate::{Error, database::IndexedAttestationId}; +use bls::AggregateSignature; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::Cow; use std::sync::{ Arc, @@ -7,7 +9,7 @@ use std::sync::{ }; use tree_hash::TreeHash as _; use tree_hash_derive::TreeHash; -use types::{AggregateSignature, EthSpec, Hash256, IndexedAttestation, VariableList}; +use types::{EthSpec, Hash256, IndexedAttestation}; #[derive(Debug, Clone, Copy)] pub struct AttesterRecord { diff --git a/slasher/src/config.rs b/slasher/src/config.rs index a8194bed499..144016efd24 100644 --- a/slasher/src/config.rs +++ b/slasher/src/config.rs @@ -2,7 +2,7 @@ use crate::Error; use serde::{Deserialize, Serialize}; use std::num::NonZeroUsize; use std::path::PathBuf; -use strum::{Display, EnumString, EnumVariantNames}; +use strum::{Display, EnumString, VariantNames}; use types::non_zero_usize::new_non_zero_usize; use types::{Epoch, EthSpec, IndexedAttestation}; @@ -59,7 +59,7 @@ pub struct DiskConfig { } #[derive( - Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, EnumVariantNames, + Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Display, EnumString, VariantNames, )] #[strum(serialize_all = "lowercase")] pub enum DatabaseBackend { diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 2df2849612e..80d073a81c6 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -7,6 +7,7 @@ use crate::{ AttesterRecord, AttesterSlashingStatus, CompactAttesterRecord, Config, Database, Error, ProposerSlashingStatus, metrics, }; +use bls::AggregateSignature; use byteorder::{BigEndian, ByteOrder}; use interface::{Environment, OpenDatabases, RwTransaction}; use lru::LruCache; @@ -14,15 +15,16 @@ use parking_lot::Mutex; use serde::de::DeserializeOwned; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; +use ssz_types::VariableList; use std::borrow::{Borrow, Cow}; use std::marker::PhantomData; use std::sync::Arc; use tracing::info; use tree_hash::TreeHash; use types::{ - AggregateSignature, AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, + AttestationData, ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, IndexedAttestationBase, IndexedAttestationElectra, ProposerSlashing, SignedBeaconBlockHeader, - Slot, VariableList, + Slot, }; /// Current database schema version, to check compatibility of on-disk DB with software. @@ -860,7 +862,8 @@ impl SlasherDB { #[cfg(test)] mod test { use super::*; - use types::{Checkpoint, ForkName, MainnetEthSpec, Unsigned}; + use typenum::Unsigned; + use types::{Checkpoint, ForkName, MainnetEthSpec}; type E = MainnetEthSpec; diff --git a/slasher/src/database/redb_impl.rs b/slasher/src/database/redb_impl.rs index 4198e826455..570d7df1318 100644 --- a/slasher/src/database/redb_impl.rs +++ b/slasher/src/database/redb_impl.rs @@ -7,7 +7,7 @@ use crate::{ *, }, }; -use derivative::Derivative; +use educe::Educe; use redb::{ReadableTable, TableDefinition}; use std::{borrow::Cow, path::PathBuf}; @@ -23,18 +23,18 @@ pub struct Database<'env> { _phantom: PhantomData<&'env ()>, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct RwTransaction<'env> { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] txn: redb::WriteTransaction, _phantom: PhantomData<&'env ()>, } -#[derive(Derivative)] -#[derivative(Debug)] +#[derive(Educe)] +#[educe(Debug)] pub struct Cursor<'env> { - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] txn: &'env redb::WriteTransaction, db: &'env Database<'env>, current_key: Option>, diff --git a/slasher/src/test_utils.rs b/slasher/src/test_utils.rs index 26338a019a2..20d1ee92175 100644 --- a/slasher/src/test_utils.rs +++ b/slasher/src/test_utils.rs @@ -1,10 +1,11 @@ +use bls::{AggregateSignature, Signature}; +use fixed_bytes::FixedBytesExtended; use std::collections::HashSet; use std::sync::Arc; use types::{ - AggregateSignature, AttestationData, AttesterSlashing, AttesterSlashingBase, - AttesterSlashingElectra, BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, - FixedBytesExtended, Hash256, IndexedAttestation, MainnetEthSpec, Signature, - SignedBeaconBlockHeader, Slot, + AttestationData, AttesterSlashing, AttesterSlashingBase, AttesterSlashingElectra, + BeaconBlockHeader, ChainSpec, Checkpoint, Epoch, EthSpec, Hash256, IndexedAttestation, + MainnetEthSpec, SignedBeaconBlockHeader, Slot, indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectra}, }; @@ -17,7 +18,7 @@ pub fn indexed_att_electra( target_root: u64, ) -> IndexedAttestation { IndexedAttestation::Electra(IndexedAttestationElectra { - attesting_indices: attesting_indices.as_ref().to_vec().into(), + attesting_indices: attesting_indices.as_ref().to_vec().try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, @@ -42,7 +43,7 @@ pub fn indexed_att( target_root: u64, ) -> IndexedAttestation { IndexedAttestation::Base(IndexedAttestationBase { - attesting_indices: attesting_indices.as_ref().to_vec().into(), + attesting_indices: attesting_indices.as_ref().to_vec().try_into().unwrap(), data: AttestationData { slot: Slot::new(0), index: 0, diff --git a/slasher/tests/main.rs b/slasher/tests/main.rs new file mode 100644 index 00000000000..fb78dcb917d --- /dev/null +++ b/slasher/tests/main.rs @@ -0,0 +1,5 @@ +mod attester_slashings; +mod backend; +mod proposer_slashings; +mod random; +mod wrap_around; diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 50007f91602..cef201ee91d 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -16,9 +16,8 @@ alloy-primitives = { workspace = true } beacon_chain = { workspace = true } bls = { workspace = true } compare_fields = { workspace = true } -compare_fields_derive = { workspace = true } context_deserialize = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } eth2_network_config = { workspace = true } ethereum_ssz = { workspace = true } ethereum_ssz_derive = { workspace = true } @@ -28,14 +27,17 @@ fs2 = { workspace = true } hex = { workspace = true } kzg = { workspace = true } logging = { workspace = true } +milhouse = { workspace = true } rayon = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } serde_repr = { workspace = true } serde_yaml = { workspace = true } snap = { workspace = true } +ssz_types = { workspace = true } state_processing = { workspace = true } swap_or_not_shuffle = { workspace = true } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index da8640d681a..0ead9d00472 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,6 +1,6 @@ # To download/extract nightly tests, run: # CONSENSUS_SPECS_TEST_VERSION=nightly make -CONSENSUS_SPECS_TEST_VERSION ?= v1.6.0-alpha.6 +CONSENSUS_SPECS_TEST_VERSION ?= v1.6.0-beta.1 REPO_NAME := consensus-spec-tests OUTPUT_DIR := ./$(REPO_NAME) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 41e3c4bff70..1f70881a887 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -54,6 +54,8 @@ "tests/general/phase0/ssz_generic/basic_progressive_list", "tests/general/phase0/ssz_generic/containers/.*/ProgressiveBitsStruct.*", "tests/general/phase0/ssz_generic/containers/.*/ProgressiveTestStruct.*", + "tests/general/phase0/ssz_generic/progressive_containers/.*", + "tests/general/phase0/ssz_generic/compatible_unions/.*", # Ignore full epoch tests for now (just test the sub-transitions). "tests/.*/.*/epoch_processing/.*/pre_epoch.ssz_snappy", "tests/.*/.*/epoch_processing/.*/post_epoch.ssz_snappy", diff --git a/testing/ef_tests/download_test_vectors.sh b/testing/ef_tests/download_test_vectors.sh index 7297f7eeb85..21f74e817f5 100755 --- a/testing/ef_tests/download_test_vectors.sh +++ b/testing/ef_tests/download_test_vectors.sh @@ -57,7 +57,7 @@ else if [[ ! -e "${test}.tar.gz" ]]; then echo "Downloading: ${version}/${test}.tar.gz" curl --progress-bar --location --remote-name --show-error --retry 3 --retry-all-errors --fail \ - "https://github.com/ethereum/consensus-spec-tests/releases/download/${version}/${test}.tar.gz" \ + "https://github.com/ethereum/consensus-specs/releases/download/${version}/${test}.tar.gz" \ || { echo "Curl failed. Aborting" rm -f "${test}.tar.gz" diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 1103d2fe822..52f5333df1a 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -1,11 +1,12 @@ use super::*; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; use serde::Deserialize; +use ssz_types::FixedVector; use tree_hash::Hash256; +use typenum::Unsigned; use types::{ BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, - BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FixedVector, FullPayload, Unsigned, - light_client_update, + BeaconBlockBodyFulu, BeaconBlockBodyGloas, BeaconState, FullPayload, light_client_update, }; #[derive(Debug, Clone, Deserialize)] diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 379fcb1bb4d..a53bce927cb 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -307,6 +307,7 @@ impl Operation for BeaconBlockBody> { ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), ForkName::Electra => BeaconBlockBody::Electra(<_>::from_ssz_bytes(bytes)?), ForkName::Fulu => BeaconBlockBody::Fulu(<_>::from_ssz_bytes(bytes)?), + // TODO(EIP-7732): See if we need to handle Gloas here _ => panic!(), }) }) @@ -366,6 +367,7 @@ impl Operation for BeaconBlockBody> { let inner = >>::from_ssz_bytes(bytes)?; BeaconBlockBody::Fulu(inner.clone_as_blinded()) } + // TODO(EIP-7732): See if we need to handle Gloas here _ => panic!(), }) }) @@ -417,6 +419,7 @@ impl Operation for WithdrawalsPayload { spec: &ChainSpec, _: &Operations, ) -> Result<(), BlockProcessingError> { + // TODO(EIP-7732): implement separate gloas and non-gloas variants of process_withdrawals process_withdrawals::<_, FullPayload<_>>(state, self.payload.to_ref(), spec) } } diff --git a/testing/ef_tests/src/cases/rewards.rs b/testing/ef_tests/src/cases/rewards.rs index d6ce8be7428..798014a6b06 100644 --- a/testing/ef_tests/src/cases/rewards.rs +++ b/testing/ef_tests/src/cases/rewards.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result_detailed; use crate::decode::{ssz_decode_file, ssz_decode_state, yaml_decode_file}; -use compare_fields_derive::CompareFields; +use compare_fields::CompareFields; use serde::Deserialize; use ssz::four_byte_option_impl; use ssz_derive::{Decode, Encode}; diff --git a/testing/ef_tests/src/cases/ssz_generic.rs b/testing/ef_tests/src/cases/ssz_generic.rs index 4152711aee7..1dd37a22eed 100644 --- a/testing/ef_tests/src/cases/ssz_generic.rs +++ b/testing/ef_tests/src/cases/ssz_generic.rs @@ -5,12 +5,14 @@ use crate::cases::common::{DecimalU128, DecimalU256, SszStaticType}; use crate::cases::ssz_static::{check_serialization, check_tree_hash}; use crate::decode::{context_yaml_decode_file, log_file_access, snappy_decode_file}; use context_deserialize::{ContextDeserialize, context_deserialize}; +use milhouse::Vector; use serde::{Deserialize, Deserializer, de::Error as SerdeError}; use ssz_derive::{Decode, Encode}; +use ssz_types::{BitList, BitVector, FixedVector, VariableList}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; -use types::typenum::*; -use types::{BitList, BitVector, FixedVector, ForkName, VariableList, Vector}; +use typenum::*; +use types::ForkName; #[derive(Debug, Clone, Deserialize)] #[context_deserialize(ForkName)] @@ -318,14 +320,13 @@ where { let s: String = serde::de::Deserialize::deserialize(deserializer)?; let decoded: Vec = hex::decode(&s.as_str()[2..]).map_err(D::Error::custom)?; + let decoded_len = decoded.len(); - if decoded.len() > N::to_usize() { - Err(D::Error::custom(format!( + decoded.try_into().map_err(|_| { + D::Error::custom(format!( "Too many values for list, got: {}, limit: {}", - decoded.len(), + decoded_len, N::to_usize() - ))) - } else { - Ok(decoded.into()) - } + )) + }) } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index c31a75c3352..a5b2ffada37 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -2,7 +2,7 @@ use crate::cases::{self, Case, Cases, EpochTransition, LoadCase, Operation}; use crate::type_name::TypeName; use crate::{FeatureName, type_name}; use context_deserialize::ContextDeserialize; -use derivative::Derivative; +use educe::Educe; use std::fs::{self, DirEntry}; use std::marker::PhantomData; use std::path::PathBuf; @@ -154,8 +154,8 @@ pub trait Handler { macro_rules! bls_eth_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { - #[derive(Derivative)] - #[derivative(Default(bound = ""))] + #[derive(Educe)] + #[educe(Default)] pub struct $runner_name; impl Handler for $runner_name { @@ -174,8 +174,8 @@ macro_rules! bls_eth_handler { macro_rules! bls_handler { ($runner_name: ident, $case_name:ident, $handler_name:expr) => { - #[derive(Derivative)] - #[derivative(Default(bound = ""))] + #[derive(Educe)] + #[educe(Default)] pub struct $runner_name; impl Handler for $runner_name { @@ -335,8 +335,8 @@ impl SszStaticHandler { } /// Handler for SSZ types that implement `CachedTreeHash`. -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SszStaticTHCHandler(PhantomData<(T, E)>); /// Handler for SSZ types that don't implement `ssz::Decode`. @@ -436,8 +436,8 @@ where } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ShufflingHandler(PhantomData); impl Handler for ShufflingHandler { @@ -460,8 +460,8 @@ impl Handler for ShufflingHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SanityBlocksHandler(PhantomData); impl Handler for SanityBlocksHandler { @@ -486,8 +486,8 @@ impl Handler for SanityBlocksHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SanitySlotsHandler(PhantomData); impl Handler for SanitySlotsHandler { @@ -511,8 +511,8 @@ impl Handler for SanitySlotsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct RandomHandler(PhantomData); impl Handler for RandomHandler { @@ -531,8 +531,8 @@ impl Handler for RandomHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct EpochProcessingHandler(PhantomData<(E, T)>); impl> Handler for EpochProcessingHandler { @@ -581,8 +581,8 @@ impl Handler for RewardsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ForkHandler(PhantomData); impl Handler for ForkHandler { @@ -601,8 +601,8 @@ impl Handler for ForkHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct TransitionHandler(PhantomData); impl Handler for TransitionHandler { @@ -621,8 +621,8 @@ impl Handler for TransitionHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct FinalityHandler(PhantomData); impl Handler for FinalityHandler { @@ -705,8 +705,8 @@ impl Handler for ForkChoiceHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct OptimisticSyncHandler(PhantomData); impl Handler for OptimisticSyncHandler { @@ -734,8 +734,8 @@ impl Handler for OptimisticSyncHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GenesisValidityHandler(PhantomData); impl Handler for GenesisValidityHandler { @@ -754,8 +754,8 @@ impl Handler for GenesisValidityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GenesisInitializationHandler(PhantomData); impl Handler for GenesisInitializationHandler { @@ -774,8 +774,8 @@ impl Handler for GenesisInitializationHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGBlobToKZGCommitmentHandler(PhantomData); impl Handler for KZGBlobToKZGCommitmentHandler { @@ -794,8 +794,8 @@ impl Handler for KZGBlobToKZGCommitmentHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeBlobKZGProofHandler(PhantomData); impl Handler for KZGComputeBlobKZGProofHandler { @@ -814,8 +814,8 @@ impl Handler for KZGComputeBlobKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeKZGProofHandler(PhantomData); impl Handler for KZGComputeKZGProofHandler { @@ -834,8 +834,8 @@ impl Handler for KZGComputeKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyBlobKZGProofHandler(PhantomData); impl Handler for KZGVerifyBlobKZGProofHandler { @@ -854,8 +854,8 @@ impl Handler for KZGVerifyBlobKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyBlobKZGProofBatchHandler(PhantomData); impl Handler for KZGVerifyBlobKZGProofBatchHandler { @@ -874,8 +874,8 @@ impl Handler for KZGVerifyBlobKZGProofBatchHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyKZGProofHandler(PhantomData); impl Handler for KZGVerifyKZGProofHandler { @@ -894,8 +894,8 @@ impl Handler for KZGVerifyKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct GetCustodyGroupsHandler(PhantomData); impl Handler for GetCustodyGroupsHandler { @@ -914,8 +914,8 @@ impl Handler for GetCustodyGroupsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct ComputeColumnsForCustodyGroupHandler(PhantomData); impl Handler for ComputeColumnsForCustodyGroupHandler { @@ -934,8 +934,8 @@ impl Handler for ComputeColumnsForCustodyGroupHandler } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeCellsHandler(PhantomData); impl Handler for KZGComputeCellsHandler { @@ -954,8 +954,8 @@ impl Handler for KZGComputeCellsHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGComputeCellsAndKZGProofHandler(PhantomData); impl Handler for KZGComputeCellsAndKZGProofHandler { @@ -974,8 +974,8 @@ impl Handler for KZGComputeCellsAndKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGVerifyCellKZGProofBatchHandler(PhantomData); impl Handler for KZGVerifyCellKZGProofBatchHandler { @@ -994,8 +994,8 @@ impl Handler for KZGVerifyCellKZGProofBatchHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KZGRecoverCellsAndKZGProofHandler(PhantomData); impl Handler for KZGRecoverCellsAndKZGProofHandler { @@ -1014,8 +1014,8 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); impl Handler for KzgInclusionMerkleProofValidityHandler { @@ -1038,8 +1038,8 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); impl Handler for MerkleProofValidityHandler { @@ -1062,8 +1062,8 @@ impl Handler for MerkleProofValidityHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct LightClientUpdateHandler(PhantomData); impl Handler for LightClientUpdateHandler { @@ -1083,13 +1083,12 @@ impl Handler for LightClientUpdateHandler { fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { // Enabled in Altair - // No test in Fulu yet. - fork_name.altair_enabled() && fork_name != ForkName::Fulu + fork_name.altair_enabled() } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct OperationsHandler(PhantomData<(E, O)>); impl> Handler for OperationsHandler { @@ -1108,8 +1107,8 @@ impl> Handler for OperationsHandler } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] +#[derive(Educe)] +#[educe(Default)] pub struct SszGenericHandler(PhantomData); impl Handler for SszGenericHandler { diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 089e4464cd7..0cec69c97e5 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -1,6 +1,7 @@ #![cfg(feature = "ef_tests")] use ef_tests::*; +use typenum::Unsigned; use types::*; // Check that the hand-computed multiplications on EthSpec are correctly computed. diff --git a/testing/execution_engine_integration/Cargo.toml b/testing/execution_engine_integration/Cargo.toml index eef13cfc738..034b6c5c8a0 100644 --- a/testing/execution_engine_integration/Cargo.toml +++ b/testing/execution_engine_integration/Cargo.toml @@ -7,13 +7,16 @@ edition = { workspace = true } portable = ["types/portable"] [dependencies] +alloy-network = { workspace = true } +alloy-primitives = { workspace = true } +alloy-provider = { workspace = true } +alloy-rpc-types-eth = { workspace = true } +alloy-signer-local = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } deposit_contract = { workspace = true } -ethers-core = { workspace = true } -ethers-middleware = { workspace = true } -ethers-providers = { workspace = true } -ethers-signers = { workspace = true } execution_layer = { workspace = true } +fixed_bytes = { workspace = true } fork_choice = { workspace = true } futures = { workspace = true } hex = { workspace = true } @@ -25,4 +28,5 @@ serde_json = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/execution_engine_integration/src/execution_engine.rs b/testing/execution_engine_integration/src/execution_engine.rs index ed4ee4682f4..3bb8585e448 100644 --- a/testing/execution_engine_integration/src/execution_engine.rs +++ b/testing/execution_engine_integration/src/execution_engine.rs @@ -1,6 +1,7 @@ -use ethers_providers::{Http, Provider}; +use alloy_provider::ProviderBuilder; use execution_layer::DEFAULT_JWT_FILE; use network_utils::unused_port::unused_tcp4_port; +use reqwest::Url; use sensitive_url::SensitiveUrl; use std::path::PathBuf; use std::process::Child; @@ -34,7 +35,7 @@ pub struct ExecutionEngine { http_port: u16, http_auth_port: u16, child: Child, - pub provider: Provider, + pub provider: Box, } impl Drop for ExecutionEngine { @@ -53,8 +54,9 @@ impl ExecutionEngine { let http_port = unused_tcp4_port().unwrap(); let http_auth_port = unused_tcp4_port().unwrap(); let child = E::start_client(&datadir, http_port, http_auth_port, jwt_secret_path); - let provider = Provider::::try_from(format!("http://localhost:{}", http_port)) - .expect("failed to instantiate ethers provider"); + let provider = Box::new(ProviderBuilder::new().connect_http( + Url::parse(&format!("http://localhost:{}", http_port)).expect("failed to parse URL"), + )); Self { engine, datadir, diff --git a/testing/execution_engine_integration/src/test_rig.rs b/testing/execution_engine_integration/src/test_rig.rs index 05ec0a2f191..8413da4c5ee 100644 --- a/testing/execution_engine_integration/src/test_rig.rs +++ b/testing/execution_engine_integration/src/test_rig.rs @@ -2,14 +2,17 @@ use crate::execution_engine::{ ACCOUNT1, ACCOUNT2, ExecutionEngine, GenericExecutionEngine, KEYSTORE_PASSWORD, PRIVATE_KEYS, }; use crate::transactions::transactions; -use ethers_middleware::SignerMiddleware; -use ethers_providers::Middleware; -use ethers_signers::LocalWallet; +use alloy_network::{EthereumWallet, TransactionBuilder}; +use alloy_primitives::Address as AlloyAddress; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_signer_local::PrivateKeySigner; +use bls::PublicKeyBytes; use execution_layer::test_utils::DEFAULT_GAS_LIMIT; use execution_layer::{ BlockProposalContentsType, BuilderParams, ChainHealth, ExecutionLayer, PayloadAttributes, PayloadParameters, PayloadStatus, }; +use fixed_bytes::FixedBytesExtended; use fork_choice::ForkchoiceUpdateParameters; use reqwest::{Client, header::CONTENT_TYPE}; use sensitive_url::SensitiveUrl; @@ -21,8 +24,9 @@ use tokio::time::sleep; use types::payload::BlockProductionVersion; use types::{ Address, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - FixedBytesExtended, ForkName, Hash256, MainnetEthSpec, PublicKeyBytes, Slot, Uint256, + ForkName, Hash256, MainnetEthSpec, Slot, Uint256, }; + const EXECUTION_ENGINE_START_TIMEOUT: Duration = Duration::from_secs(60); const TEST_FORK: ForkName = ForkName::Capella; @@ -64,7 +68,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: let client = Client::builder().build().unwrap(); let request = client - .post(http_url.full.clone()) + .post(http_url.expose_full().clone()) .header(CONTENT_TYPE, "application/json") .json(&body); @@ -90,7 +94,7 @@ async fn import_and_unlock(http_url: SensitiveUrl, priv_keys: &[&str], password: ); let request = client - .post(http_url.full.clone()) + .post(http_url.expose_full().clone()) .header(CONTENT_TYPE, "application/json") .json(&body); @@ -202,12 +206,13 @@ impl TestRig { self.wait_until_synced().await; // Create a local signer in case we need to sign transactions locally - let wallet1: LocalWallet = PRIVATE_KEYS[0].parse().expect("Invalid private key"); - let signer = SignerMiddleware::new(&self.ee_a.execution_engine.provider, wallet1); + let private_key_signer: PrivateKeySigner = + PRIVATE_KEYS[0].parse().expect("Invalid private key"); + let wallet = EthereumWallet::from(private_key_signer); // We hardcode the accounts here since some EEs start with a default unlocked account - let account1 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT1).unwrap()); - let account2 = ethers_core::types::Address::from_slice(&hex::decode(ACCOUNT2).unwrap()); + let account1 = AlloyAddress::from_slice(&hex::decode(ACCOUNT1).unwrap()); + let account2 = AlloyAddress::from_slice(&hex::decode(ACCOUNT2).unwrap()); /* * Read the terminal block hash from both pairs, check it's equal. @@ -237,11 +242,18 @@ impl TestRig { if self.use_local_signing { // Sign locally with the Signer middleware - for (i, tx) in txs.clone().into_iter().enumerate() { + for (i, mut tx) in txs.clone().into_iter().enumerate() { // The local signer uses eth_sendRawTransaction, so we need to manually set the nonce - let mut tx = tx.clone(); - tx.set_nonce(i as u64); - let pending_tx = signer.send_transaction(tx, None).await.unwrap(); + tx = tx.with_nonce(i as u64); + let wallet_provider = ProviderBuilder::new().wallet(wallet.clone()).connect_http( + self.ee_a + .execution_engine + .http_url() + .to_string() + .parse() + .unwrap(), + ); + let pending_tx = wallet_provider.send_transaction(tx).await.unwrap(); pending_txs.push(pending_tx); } } else { @@ -261,7 +273,7 @@ impl TestRig { .ee_a .execution_engine .provider - .send_transaction(tx, None) + .send_transaction(tx) .await .unwrap(); pending_txs.push(pending_tx); @@ -446,11 +458,10 @@ impl TestRig { // Verify that all submitted txs were successful for pending_tx in pending_txs { - let tx_receipt = pending_tx.await.unwrap().unwrap(); - assert_eq!( - tx_receipt.status, - Some(1.into()), - "Tx index {} has invalid status ", + let tx_receipt = pending_tx.get_receipt().await.unwrap(); + assert!( + tx_receipt.status(), + "Tx index {:?} has invalid status ", tx_receipt.transaction_index ); } diff --git a/testing/execution_engine_integration/src/transactions.rs b/testing/execution_engine_integration/src/transactions.rs index b6111426b67..8cd63ce307a 100644 --- a/testing/execution_engine_integration/src/transactions.rs +++ b/testing/execution_engine_integration/src/transactions.rs @@ -1,9 +1,10 @@ +use alloy_network::TransactionBuilder; +use alloy_primitives::{Address, U256}; +use alloy_rpc_types_eth::{AccessList, TransactionRequest}; +use bls::{Keypair, Signature}; use deposit_contract::{BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, encode_eth1_tx_data}; -use ethers_core::types::{ - Address, Bytes, Eip1559TransactionRequest, TransactionRequest, U256, - transaction::{eip2718::TypedTransaction, eip2930::AccessList}, -}; -use types::{DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, Signature}; +use fixed_bytes::FixedBytesExtended; +use types::{DepositData, EthSpec, Hash256}; /// Hardcoded deposit contract address based on sender address and nonce pub const DEPOSIT_CONTRACT_ADDRESS: &str = "64f43BEc7F86526686C931d65362bB8698872F90"; @@ -21,7 +22,7 @@ pub enum Transaction { } /// Get a list of transactions to publish to the execution layer. -pub fn transactions(account1: Address, account2: Address) -> Vec { +pub fn transactions(account1: Address, account2: Address) -> Vec { vec![ Transaction::Transfer(account1, account2).transaction::(), Transaction::TransferLegacy(account1, account2).transaction::(), @@ -29,7 +30,7 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(), Transaction::DepositDepositContract { sender: account1, - deposit_contract_address: ethers_core::types::Address::from_slice( + deposit_contract_address: Address::from_slice( &hex::decode(DEPOSIT_CONTRACT_ADDRESS).unwrap(), ), } @@ -38,33 +39,36 @@ pub fn transactions(account1: Address, account2: Address) -> Vec(&self) -> TypedTransaction { + pub fn transaction(&self) -> TransactionRequest { match &self { - Self::TransferLegacy(from, to) => TransactionRequest::new() + Self::TransferLegacy(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) - .into(), - Self::Transfer(from, to) => Eip1559TransactionRequest::new() + .value(U256::from(1)) + .with_gas_price(1_000_000_000u128), // 1 gwei + Self::Transfer(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) - .into(), - Self::TransferAccessList(from, to) => TransactionRequest::new() + .value(U256::from(1)) + .with_max_fee_per_gas(2_000_000_000u128) + .with_max_priority_fee_per_gas(1_000_000_000u128), + Self::TransferAccessList(from, to) => TransactionRequest::default() .from(*from) .to(*to) - .value(1) + .value(U256::from(1)) .with_access_list(AccessList::default()) - .into(), + .with_gas_price(1_000_000_000u128), // 1 gwei Self::DeployDepositContract(addr) => { let mut bytecode = String::from_utf8(BYTECODE.to_vec()).unwrap(); bytecode.retain(|c| c.is_ascii_hexdigit()); let bytecode = hex::decode(&bytecode[1..]).unwrap(); - TransactionRequest::new() + let mut req = TransactionRequest::default() .from(*addr) - .data(Bytes::from(bytecode)) - .gas(CONTRACT_DEPLOY_GAS) - .into() + .with_input(bytecode) + .with_gas_limit(CONTRACT_DEPLOY_GAS.try_into().unwrap()) + .with_gas_price(1_000_000_000u128); // 1 gwei + req.set_create(); + req } Self::DepositDepositContract { sender, @@ -80,13 +84,13 @@ impl Transaction { signature: Signature::empty().into(), }; deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - TransactionRequest::new() + TransactionRequest::default() .from(*sender) .to(*deposit_contract_address) - .data(Bytes::from(encode_eth1_tx_data(&deposit).unwrap())) - .gas(DEPOSIT_GAS) - .value(U256::from(amount) * U256::exp10(9)) - .into() + .with_input(encode_eth1_tx_data(&deposit).unwrap()) + .with_gas_limit(DEPOSIT_GAS.try_into().unwrap()) + .value(U256::from(amount) * U256::from(10).pow(U256::from(9))) + .with_gas_price(1_000_000_000u128) // 1 gwei } } } diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index df191ed5af7..e49d11ee1eb 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -248,14 +248,8 @@ impl LocalExecutionNode { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } - let spec = context.eth2_config.spec.clone(); Self { - server: MockServer::new_with_config( - &context.executor.handle().unwrap(), - config, - spec, - None, - ), + server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), datadir, } } diff --git a/testing/simulator/Cargo.toml b/testing/simulator/Cargo.toml index cd23138a1cc..a1b1b6f95d2 100644 --- a/testing/simulator/Cargo.toml +++ b/testing/simulator/Cargo.toml @@ -15,9 +15,10 @@ logging = { workspace = true } node_test_rig = { path = "../node_test_rig" } parking_lot = { workspace = true } rayon = { workspace = true } -sensitive_url = { path = "../../common/sensitive_url" } +sensitive_url = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } +typenum = { workspace = true } types = { workspace = true } diff --git a/testing/simulator/src/checks.rs b/testing/simulator/src/checks.rs index 1240785121a..35200692c32 100644 --- a/testing/simulator/src/checks.rs +++ b/testing/simulator/src/checks.rs @@ -1,7 +1,8 @@ use crate::local_network::LocalNetwork; use node_test_rig::eth2::types::{BlockId, FinalityCheckpointsData, StateId}; use std::time::Duration; -use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot, Unsigned}; +use typenum::Unsigned; +use types::{Epoch, EthSpec, ExecPayload, ExecutionBlockHash, Slot}; /// Checks that all of the validators have on-boarded by the start of the second eth1 voting /// period. diff --git a/testing/state_transition_vectors/Cargo.toml b/testing/state_transition_vectors/Cargo.toml index 66376f0a51d..437aa539f41 100644 --- a/testing/state_transition_vectors/Cargo.toml +++ b/testing/state_transition_vectors/Cargo.toml @@ -10,7 +10,9 @@ portable = ["beacon_chain/portable"] [dependencies] beacon_chain = { workspace = true } +bls = { workspace = true } ethereum_ssz = { workspace = true } +fixed_bytes = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } diff --git a/testing/state_transition_vectors/Makefile b/testing/state_transition_vectors/Makefile index 437aa50b00a..c90810ad398 100644 --- a/testing/state_transition_vectors/Makefile +++ b/testing/state_transition_vectors/Makefile @@ -5,4 +5,4 @@ test: cargo test --release --features "$(TEST_FEATURES)" clean: - rm -r vectors/ + rm -rf vectors/ diff --git a/testing/state_transition_vectors/src/main.rs b/testing/state_transition_vectors/src/main.rs index 4a829b68035..80c30489b7c 100644 --- a/testing/state_transition_vectors/src/main.rs +++ b/testing/state_transition_vectors/src/main.rs @@ -3,6 +3,8 @@ mod macros; mod exit; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; +use bls::Keypair; +use fixed_bytes::FixedBytesExtended; use ssz::Encode; use std::env; use std::fs::{self, File}; @@ -10,10 +12,8 @@ use std::io::Write; use std::path::{Path, PathBuf}; use std::process::exit; use std::sync::LazyLock; -use types::{ - BeaconState, EthSpec, Keypair, SignedBeaconBlock, test_utils::generate_deterministic_keypairs, -}; -use types::{FixedBytesExtended, Hash256, MainnetEthSpec, Slot}; +use types::{BeaconState, EthSpec, SignedBeaconBlock, test_utils::generate_deterministic_keypairs}; +use types::{Hash256, MainnetEthSpec, Slot}; type E = MainnetEthSpec; diff --git a/testing/web3signer_tests/Cargo.toml b/testing/web3signer_tests/Cargo.toml index b4637b4030f..3ef2e0f7f7a 100644 --- a/testing/web3signer_tests/Cargo.toml +++ b/testing/web3signer_tests/Cargo.toml @@ -9,10 +9,12 @@ edition = { workspace = true } [dev-dependencies] account_utils = { workspace = true } async-channel = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } eth2_keystore = { workspace = true } eth2_network_config = { workspace = true } +fixed_bytes = { workspace = true } futures = { workspace = true } initialized_validators = { workspace = true } lighthouse_validator_store = { workspace = true } @@ -24,6 +26,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } slashing_protection = { workspace = true } slot_clock = { workspace = true } +ssz_types = { workspace = true } task_executor = { workspace = true } tempfile = { workspace = true } tokio = { workspace = true } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index 15ec745e3f1..541f9b2b4a7 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -20,9 +20,11 @@ mod tests { use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, ValidatorDefinitions, Web3SignerDefinition, }; + use bls::{AggregateSignature, Keypair, PublicKeyBytes, SecretKey, Signature}; use eth2::types::FullBlockContents; use eth2_keystore::KeystoreBuilder; use eth2_network_config::Eth2NetworkConfig; + use fixed_bytes::FixedBytesExtended; use initialized_validators::{ InitializedValidators, load_pem_certificate, load_pkcs12_identity, }; @@ -32,6 +34,7 @@ mod tests { use serde::Serialize; use slashing_protection::{SLASHING_PROTECTION_FILENAME, SlashingDatabase}; use slot_clock::{SlotClock, TestingSlotClock}; + use ssz_types::BitList; use std::env; use std::fmt::Debug; use std::fs::{self, File}; diff --git a/validator_client/Cargo.toml b/validator_client/Cargo.toml index a8c8fd59f13..6990a2f61a7 100644 --- a/validator_client/Cargo.toml +++ b/validator_client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "validator_client" -version = "0.3.5" +version = { workspace = true } authors = ["Sigma Prime "] edition = { workspace = true } diff --git a/validator_client/beacon_node_fallback/Cargo.toml b/validator_client/beacon_node_fallback/Cargo.toml index 5fe2af4cb0b..481aece48b2 100644 --- a/validator_client/beacon_node_fallback/Cargo.toml +++ b/validator_client/beacon_node_fallback/Cargo.toml @@ -9,6 +9,7 @@ name = "beacon_node_fallback" path = "src/lib.rs" [dependencies] +bls = { workspace = true } clap = { workspace = true } eth2 = { workspace = true } futures = { workspace = true } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index a3f60d2de04..2d75df2fa34 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -20,7 +20,7 @@ use std::future::Future; use std::sync::Arc; use std::time::{Duration, Instant}; use std::vec::Vec; -use strum::EnumVariantNames; +use strum::VariantNames; use task_executor::TaskExecutor; use tokio::{sync::RwLock, time::sleep}; use tracing::{debug, error, warn}; @@ -656,7 +656,7 @@ impl BeaconNodeFallback { R: Future>, Err: Debug, { - inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.as_ref()]); + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.server().redacted()]); // There exists a race condition where `func` may be called when the candidate is // actually not ready. We deem this an acceptable inefficiency. @@ -668,7 +668,7 @@ impl BeaconNodeFallback { error = ?e, "Request to beacon node failed" ); - inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.as_ref()]); + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.server().redacted()]); Err((candidate.to_string(), Error::RequestFailed(e))) } } @@ -752,7 +752,7 @@ async fn sort_nodes_by_health(nodes: &mut Vec) { } /// Serves as a cue for `BeaconNodeFallback` to tell which requests need to be broadcasted. -#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, EnumVariantNames, ValueEnum)] +#[derive(Clone, Copy, Debug, PartialEq, Deserialize, Serialize, VariantNames, ValueEnum)] #[strum(serialize_all = "kebab-case")] pub enum ApiTopic { None, @@ -773,12 +773,13 @@ impl ApiTopic { mod tests { use super::*; use crate::beacon_node_health::BeaconNodeHealthTier; + use bls::Signature; use eth2::SensitiveUrl; use eth2::Timeouts; use slot_clock::TestingSlotClock; use strum::VariantNames; use types::{BeaconBlockDeneb, MainnetEthSpec, Slot}; - use types::{EmptyBlock, Signature, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; + use types::{EmptyBlock, SignedBeaconBlockDeneb, SignedBlindedBeaconBlock}; use validator_test_rig::mock_beacon_node::MockBeaconNode; type E = MainnetEthSpec; diff --git a/validator_client/doppelganger_service/Cargo.toml b/validator_client/doppelganger_service/Cargo.toml index e5b183570de..66b27eb39d5 100644 --- a/validator_client/doppelganger_service/Cargo.toml +++ b/validator_client/doppelganger_service/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Sigma Prime "] [dependencies] beacon_node_fallback = { workspace = true } +bls = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } logging = { workspace = true } diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index b0ed78e9965..600ae82c546 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -30,6 +30,7 @@ //! Doppelganger protection is a best-effort, last-line-of-defence mitigation. Do not rely upon it. use beacon_node_fallback::BeaconNodeFallback; +use bls::PublicKeyBytes; use environment::RuntimeContext; use eth2::types::LivenessResponseData; use logging::crit; @@ -41,7 +42,7 @@ use std::sync::Arc; use task_executor::ShutdownReason; use tokio::time::sleep; use tracing::{error, info}; -use types::{Epoch, EthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Slot}; use validator_store::{DoppelgangerStatus, ValidatorStore}; struct LivenessResponses { diff --git a/validator_client/graffiti_file/src/lib.rs b/validator_client/graffiti_file/src/lib.rs index 8b5637d09ed..8e40ef907dd 100644 --- a/validator_client/graffiti_file/src/lib.rs +++ b/validator_client/graffiti_file/src/lib.rs @@ -154,7 +154,7 @@ mod tests { let pk5 = PublicKeyBytes::deserialize(&hex::decode(&PK5[2..]).unwrap()).unwrap(); let pk6 = PublicKeyBytes::deserialize(&hex::decode(&PK6[2..]).unwrap()).unwrap(); - let file_name = temp.into_path().join("graffiti.txt"); + let file_name = temp.keep().join("graffiti.txt"); let file = File::create(&file_name).unwrap(); let mut graffiti_file = LineWriter::new(file); diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 588aa2ca931..2bd57867acf 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -16,10 +16,11 @@ deposit_contract = { workspace = true } directory = { workspace = true } dirs = { workspace = true } doppelganger_service = { workspace = true } -eth2 = { workspace = true } +eth2 = { workspace = true, features = ["lighthouse"] } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } graffiti_file = { workspace = true } health_metrics = { workspace = true } initialized_validators = { workspace = true } @@ -41,6 +42,7 @@ tempfile = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } tracing = { workspace = true } +typenum = { workspace = true } types = { workspace = true } url = { workspace = true } validator_dir = { workspace = true } @@ -54,3 +56,4 @@ zeroize = { workspace = true } futures = { workspace = true } itertools = { workspace = true } rand = { workspace = true, features = ["small_rng"] } +ssz_types = { workspace = true } diff --git a/validator_client/http_api/src/keystores.rs b/validator_client/http_api/src/keystores.rs index c0f918f9bb8..18accf0d5a0 100644 --- a/validator_client/http_api/src/keystores.rs +++ b/validator_client/http_api/src/keystores.rs @@ -1,5 +1,6 @@ //! Implementation of the standard keystore management API. use account_utils::validator_definitions::PasswordStorage; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::{ std_types::{ DeleteKeystoreStatus, DeleteKeystoresRequest, DeleteKeystoresResponse, @@ -18,7 +19,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use validator_dir::{Builder as ValidatorDirBuilder, keystore_password_path}; use warp::Rejection; use warp_utils::reject::{custom_bad_request, custom_server_error}; diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 4494fca9574..a35b4ec6c6d 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -22,6 +22,7 @@ use account_utils::{ }; pub use api_secret::ApiSecret; use beacon_node_fallback::CandidateInfo; +use bls::{PublicKey, PublicKeyBytes}; use core::convert::Infallible; use create_validator::{ create_validators_mnemonic, create_validators_web3signer, get_voting_password_storage, @@ -30,8 +31,8 @@ use directory::{DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_VALIDATOR_D use eth2::lighthouse_vc::{ std_types::{AuthResponse, GetFeeRecipientResponse, GetGasLimitResponse}, types::{ - self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, PublicKey, - PublicKeyBytes, SetGraffitiRequest, UpdateCandidatesRequest, UpdateCandidatesResponse, + self as api_types, GenericResponse, GetGraffitiResponse, Graffiti, SetGraffitiRequest, + UpdateCandidatesRequest, UpdateCandidatesResponse, }, }; use health_metrics::observe::Observe; diff --git a/validator_client/http_api/src/remotekeys.rs b/validator_client/http_api/src/remotekeys.rs index 5aa63baac3b..987e1b8740d 100644 --- a/validator_client/http_api/src/remotekeys.rs +++ b/validator_client/http_api/src/remotekeys.rs @@ -2,6 +2,7 @@ use account_utils::validator_definitions::{ SigningDefinition, ValidatorDefinition, Web3SignerDefinition, }; +use bls::PublicKeyBytes; use eth2::lighthouse_vc::std_types::{ DeleteRemotekeyStatus, DeleteRemotekeysRequest, DeleteRemotekeysResponse, ImportRemotekeyStatus, ImportRemotekeysRequest, ImportRemotekeysResponse, @@ -14,7 +15,7 @@ use std::sync::Arc; use task_executor::TaskExecutor; use tokio::runtime::Handle; use tracing::{info, warn}; -use types::{EthSpec, PublicKeyBytes}; +use types::EthSpec; use url::Url; use warp::Rejection; use warp_utils::reject::custom_server_error; diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 9a8784f2023..f83d9f4d526 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -4,6 +4,7 @@ use account_utils::validator_definitions::ValidatorDefinitions; use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, }; +use bls::Keypair; use deposit_contract::decode_eth1_tx_data; use doppelganger_service::DoppelgangerService; use eth2::{ diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index b0780e74278..5cb631983cc 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -11,6 +11,7 @@ use account_utils::{ eth2_wallet::WalletBuilder, mnemonic_from_phrase, random_mnemonic, random_password, random_password_string, validator_definitions::ValidatorDefinitions, }; +use bls::{Keypair, PublicKeyBytes}; use deposit_contract::decode_eth1_tx_data; use eth2::{ Error as ApiError, diff --git a/validator_client/http_api/src/tests/keystores.rs b/validator_client/http_api/src/tests/keystores.rs index dd2266e3f6e..eeb3cd94de0 100644 --- a/validator_client/http_api/src/tests/keystores.rs +++ b/validator_client/http_api/src/tests/keystores.rs @@ -1,19 +1,23 @@ use super::*; use account_utils::random_password_string; use bls::PublicKeyBytes; +use bls::{AggregateSignature, PublicKey}; use eth2::lighthouse_vc::types::UpdateFeeRecipientRequest; use eth2::lighthouse_vc::{ http_client::ValidatorClientHttpClient as HttpClient, std_types::{KeystoreJsonStr as Keystore, *}, types::Web3SignerValidatorRequest, }; +use fixed_bytes::FixedBytesExtended; use itertools::Itertools; use lighthouse_validator_store::DEFAULT_GAS_LIMIT; use rand::rngs::StdRng; use rand::{Rng, SeedableRng}; use slashing_protection::interchange::{Interchange, InterchangeMetadata}; +use ssz_types::BitList; use std::{collections::HashMap, path::Path}; use tokio::runtime::Handle; +use typenum::Unsigned; use types::{Address, attestation::AttestationBase}; use validator_store::ValidatorStore; use zeroize::Zeroizing; diff --git a/validator_client/initialized_validators/src/lib.rs b/validator_client/initialized_validators/src/lib.rs index 4d61bd4ed81..db6d03174dd 100644 --- a/validator_client/initialized_validators/src/lib.rs +++ b/validator_client/initialized_validators/src/lib.rs @@ -15,6 +15,7 @@ use account_utils::{ Web3SignerDefinition, }, }; +use bls::{Keypair, PublicKey, PublicKeyBytes}; use eth2_keystore::Keystore; use lockfile::{Lockfile, LockfileError}; use metrics::set_gauge; @@ -30,7 +31,7 @@ use std::sync::Arc; use std::time::Duration; use tracing::{debug, error, info, warn}; use types::graffiti::GraffitiString; -use types::{Address, Graffiti, Keypair, PublicKey, PublicKeyBytes}; +use types::{Address, Graffiti}; use url::{ParseError, Url}; use validator_dir::Builder as ValidatorDirBuilder; use zeroize::Zeroizing; diff --git a/validator_client/lighthouse_validator_store/Cargo.toml b/validator_client/lighthouse_validator_store/Cargo.toml index 0f8220bdc9f..01c7616be15 100644 --- a/validator_client/lighthouse_validator_store/Cargo.toml +++ b/validator_client/lighthouse_validator_store/Cargo.toml @@ -7,6 +7,7 @@ authors = ["Sigma Prime "] [dependencies] account_utils = { workspace = true } beacon_node_fallback = { workspace = true } +bls = { workspace = true } doppelganger_service = { workspace = true } either = { workspace = true } environment = { workspace = true } diff --git a/validator_client/lighthouse_validator_store/src/lib.rs b/validator_client/lighthouse_validator_store/src/lib.rs index d10fecb32e4..3bea21a05d8 100644 --- a/validator_client/lighthouse_validator_store/src/lib.rs +++ b/validator_client/lighthouse_validator_store/src/lib.rs @@ -1,4 +1,5 @@ use account_utils::validator_definitions::{PasswordStorage, ValidatorDefinition}; +use bls::{PublicKeyBytes, Signature}; use doppelganger_service::DoppelgangerService; use eth2::types::PublishBlockRequest; use initialized_validators::InitializedValidators; @@ -15,13 +16,13 @@ use std::marker::PhantomData; use std::path::Path; use std::sync::Arc; use task_executor::TaskExecutor; -use tracing::{error, info, warn}; +use tracing::{error, info, instrument, warn}; use types::{ AbstractExecPayload, Address, AggregateAndProof, Attestation, BeaconBlock, BlindedPayload, ChainSpec, ContributionAndProof, Domain, Epoch, EthSpec, Fork, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBeaconBlock, - SignedContributionAndProof, SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, - Slot, SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, + SelectionProof, SignedAggregateAndProof, SignedBeaconBlock, SignedContributionAndProof, + SignedRoot, SignedValidatorRegistrationData, SignedVoluntaryExit, Slot, + SyncAggregatorSelectionData, SyncCommitteeContribution, SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, VoluntaryExit, graffiti::GraffitiString, }; @@ -242,6 +243,7 @@ impl LighthouseValidatorStore { /// Returns a `SigningMethod` for `validator_pubkey` *only if* that validator is considered safe /// by doppelganger protection. + #[instrument(skip_all, level = "debug")] fn doppelganger_checked_signing_method( &self, validator_pubkey: PublicKeyBytes, @@ -745,6 +747,7 @@ impl ValidatorStore for LighthouseValidatorS } } + #[instrument(skip_all)] async fn sign_attestation( &self, validator_pubkey: PublicKeyBytes, diff --git a/validator_client/signing_method/Cargo.toml b/validator_client/signing_method/Cargo.toml index 3e1a48142f9..cb321c2d498 100644 --- a/validator_client/signing_method/Cargo.toml +++ b/validator_client/signing_method/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +bls = { workspace = true } eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } lockfile = { workspace = true } @@ -12,6 +13,7 @@ parking_lot = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } task_executor = { workspace = true } +tracing = { workspace = true } types = { workspace = true } url = { workspace = true } validator_metrics = { workspace = true } diff --git a/validator_client/signing_method/src/lib.rs b/validator_client/signing_method/src/lib.rs index c535415b1e9..d0d98689526 100644 --- a/validator_client/signing_method/src/lib.rs +++ b/validator_client/signing_method/src/lib.rs @@ -3,6 +3,7 @@ //! - Via a local `Keypair`. //! - Via a remote signer (Web3Signer) +use bls::{Keypair, PublicKey, Signature}; use eth2_keystore::Keystore; use lockfile::Lockfile; use parking_lot::Mutex; @@ -10,6 +11,7 @@ use reqwest::{Client, header::ACCEPT}; use std::path::PathBuf; use std::sync::Arc; use task_executor::TaskExecutor; +use tracing::instrument; use types::*; use url::Url; use web3signer::{ForkInfo, MessageType, SigningRequest, SigningResponse}; @@ -131,6 +133,7 @@ impl SigningMethod { } /// Return the signature of `signable_message`, with respect to the `signing_context`. + #[instrument(skip_all, level = "debug")] pub async fn get_signature>( &self, signable_message: SignableMessage<'_, E, Payload>, diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 99fad103035..246d9e9e091 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -1,6 +1,7 @@ //! Contains the types required to make JSON requests to Web3Signer servers. use super::Error; +use bls::{PublicKeyBytes, Signature}; use serde::{Deserialize, Serialize}; use types::*; diff --git a/validator_client/slashing_protection/Cargo.toml b/validator_client/slashing_protection/Cargo.toml index 6a778c5de31..b80da6c7867 100644 --- a/validator_client/slashing_protection/Cargo.toml +++ b/validator_client/slashing_protection/Cargo.toml @@ -11,9 +11,11 @@ portable = ["types/portable"] [dependencies] arbitrary = { workspace = true, features = ["derive"] } +bls = { workspace = true } eip_3076 = { workspace = true, features = ["json"] } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } +fixed_bytes = { workspace = true } r2d2 = { workspace = true } r2d2_sqlite = "0.21.0" rusqlite = { workspace = true } diff --git a/validator_client/slashing_protection/src/attestation_tests.rs b/validator_client/slashing_protection/src/attestation_tests.rs index 37766f271bb..d16c9613369 100644 --- a/validator_client/slashing_protection/src/attestation_tests.rs +++ b/validator_client/slashing_protection/src/attestation_tests.rs @@ -2,7 +2,8 @@ use crate::test_utils::*; use crate::*; -use types::{AttestationData, Checkpoint, Epoch, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{AttestationData, Checkpoint, Epoch, Slot}; pub fn build_checkpoint(epoch_num: u64) -> Checkpoint { Checkpoint { diff --git a/validator_client/slashing_protection/src/bin/test_generator.rs b/validator_client/slashing_protection/src/bin/test_generator.rs index dfda7983f73..df1c63f37d3 100644 --- a/validator_client/slashing_protection/src/bin/test_generator.rs +++ b/validator_client/slashing_protection/src/bin/test_generator.rs @@ -1,11 +1,12 @@ use eip_3076::{Interchange, InterchangeData, InterchangeMetadata, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use slashing_protection::SUPPORTED_INTERCHANGE_FORMAT_VERSION; use slashing_protection::interchange_test::{MultiTestCase, TestCase}; use slashing_protection::test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}; use std::fs::{self, File}; use std::io::Write; use std::path::Path; -use types::{Epoch, FixedBytesExtended, Hash256, Slot}; +use types::{Epoch, Hash256, Slot}; fn metadata(genesis_validators_root: Hash256) -> InterchangeMetadata { InterchangeMetadata { diff --git a/validator_client/slashing_protection/src/block_tests.rs b/validator_client/slashing_protection/src/block_tests.rs index b3273015f42..2531f52d8ce 100644 --- a/validator_client/slashing_protection/src/block_tests.rs +++ b/validator_client/slashing_protection/src/block_tests.rs @@ -2,7 +2,8 @@ use super::*; use crate::test_utils::*; -use types::{BeaconBlockHeader, FixedBytesExtended, Slot}; +use fixed_bytes::FixedBytesExtended; +use types::{BeaconBlockHeader, Slot}; pub fn block(slot: u64) -> BeaconBlockHeader { BeaconBlockHeader { diff --git a/validator_client/slashing_protection/src/extra_interchange_tests.rs b/validator_client/slashing_protection/src/extra_interchange_tests.rs index 0f88ec8b1dc..18457720e4e 100644 --- a/validator_client/slashing_protection/src/extra_interchange_tests.rs +++ b/validator_client/slashing_protection/src/extra_interchange_tests.rs @@ -2,8 +2,8 @@ use crate::test_utils::pubkey; use crate::*; +use fixed_bytes::FixedBytesExtended; use tempfile::tempdir; -use types::FixedBytesExtended; #[test] fn export_non_existent_key() { diff --git a/validator_client/slashing_protection/src/interchange_test.rs b/validator_client/slashing_protection/src/interchange_test.rs index ebe0105f24d..0dfcda204d7 100644 --- a/validator_client/slashing_protection/src/interchange_test.rs +++ b/validator_client/slashing_protection/src/interchange_test.rs @@ -2,11 +2,13 @@ use crate::{ SigningRoot, SlashingDatabase, test_utils::{DEFAULT_GENESIS_VALIDATORS_ROOT, pubkey}, }; +use bls::PublicKeyBytes; use eip_3076::{Interchange, SignedAttestation, SignedBlock}; +use fixed_bytes::FixedBytesExtended; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use tempfile::tempdir; -use types::{Epoch, FixedBytesExtended, Hash256, PublicKeyBytes, Slot}; +use types::{Epoch, Hash256, Slot}; #[derive(Debug, Clone, Deserialize, Serialize)] #[cfg_attr(feature = "arbitrary-fuzz", derive(arbitrary::Arbitrary))] diff --git a/validator_client/slashing_protection/src/lib.rs b/validator_client/slashing_protection/src/lib.rs index 917d51d38b7..f8580e73158 100644 --- a/validator_client/slashing_protection/src/lib.rs +++ b/validator_client/slashing_protection/src/lib.rs @@ -19,10 +19,11 @@ pub use crate::slashing_database::{ InterchangeError, InterchangeImportOutcome, SUPPORTED_INTERCHANGE_FORMAT_VERSION, SlashingDatabase, }; +use bls::PublicKeyBytes; use rusqlite::Error as SQLError; use std::fmt::Display; use std::io::{Error as IOError, ErrorKind}; -use types::{Hash256, PublicKeyBytes}; +use types::Hash256; /// The filename within the `validators` directory that contains the slashing protection DB. pub const SLASHING_PROTECTION_FILENAME: &str = "slashing_protection.sqlite"; @@ -133,7 +134,7 @@ impl Display for NotSafe { #[cfg(test)] mod test { - use types::FixedBytesExtended; + use fixed_bytes::FixedBytesExtended; use super::*; diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index ce32299a511..67e1234ac57 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1,6 +1,7 @@ use crate::signed_attestation::InvalidAttestation; use crate::signed_block::InvalidBlock; use crate::{NotSafe, Safe, SignedAttestation, SignedBlock, SigningRoot, signing_root_from_row}; +use bls::PublicKeyBytes; use eip_3076::{ Interchange, InterchangeData, InterchangeMetadata, SignedAttestation as InterchangeAttestation, SignedBlock as InterchangeBlock, @@ -11,7 +12,8 @@ use rusqlite::{OptionalExtension, Transaction, TransactionBehavior, params}; use std::fs::File; use std::path::Path; use std::time::Duration; -use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, PublicKeyBytes, SignedRoot, Slot}; +use tracing::instrument; +use types::{AttestationData, BeaconBlockHeader, Epoch, Hash256, SignedRoot, Slot}; type Pool = r2d2::Pool; @@ -639,6 +641,7 @@ impl SlashingDatabase { /// to prevent concurrent checks and inserts from resulting in slashable data being inserted. /// /// This is the safe, externally-callable interface for checking attestations. + #[instrument(skip_all, level = "debug")] pub fn check_and_insert_attestation( &self, validator_pubkey: &PublicKeyBytes, diff --git a/validator_client/slashing_protection/tests/migration.rs b/validator_client/slashing_protection/tests/migration.rs index 3d4ec7ea9a8..14bf0d63f93 100644 --- a/validator_client/slashing_protection/tests/migration.rs +++ b/validator_client/slashing_protection/tests/migration.rs @@ -1,10 +1,11 @@ //! Tests for upgrading a previous version of the database to the latest schema. +use fixed_bytes::FixedBytesExtended; use slashing_protection::{NotSafe, SlashingDatabase}; use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use tempfile::tempdir; -use types::{FixedBytesExtended, Hash256}; +use types::Hash256; fn test_data_dir() -> PathBuf { Path::new(&std::env::var("CARGO_MANIFEST_DIR").unwrap()).join("migration-tests") diff --git a/validator_client/src/cli.rs b/validator_client/src/cli.rs index 477781d3e88..3e1c46097f0 100644 --- a/validator_client/src/cli.rs +++ b/validator_client/src/cli.rs @@ -150,6 +150,16 @@ pub struct ValidatorClient { )] pub graffiti: Option, + #[clap( + long, + requires = "graffiti", + help = "When used, client version info will be prepended to user custom graffiti, with a space in between. \ + This should only be used with a Lighthouse beacon node.", + display_order = 0, + help_heading = FLAG_HEADER + )] + pub graffiti_append: bool, + #[clap( long, value_name = "GRAFFITI-FILE", diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 04d69dc9dc1..1a286a74dc1 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -7,7 +7,7 @@ use directory::{ DEFAULT_HARDCODED_NETWORK, DEFAULT_ROOT_DIR, DEFAULT_SECRET_DIR, DEFAULT_VALIDATOR_DIR, get_network_dir, }; -use eth2::types::Graffiti; +use eth2::types::{Graffiti, GraffitiPolicy}; use graffiti_file::GraffitiFile; use initialized_validators::Config as InitializedValidatorsConfig; use lighthouse_validator_store::Config as ValidatorStoreConfig; @@ -55,6 +55,8 @@ pub struct Config { pub graffiti: Option, /// Graffiti file to load per validator graffitis. pub graffiti_file: Option, + /// GraffitiPolicy to append client version info + pub graffiti_policy: Option, /// Configuration for the HTTP REST API. pub http_api: validator_http_api::Config, /// Configuration for the HTTP REST API. @@ -119,6 +121,7 @@ impl Default for Config { long_timeouts_multiplier: 1, graffiti: None, graffiti_file: None, + graffiti_policy: None, http_api: <_>::default(), http_metrics: <_>::default(), beacon_node_fallback: <_>::default(), @@ -233,6 +236,12 @@ impl Config { } } + config.graffiti_policy = if validator_client_config.graffiti_append { + Some(GraffitiPolicy::AppendClientVersions) + } else { + Some(GraffitiPolicy::PreserveUserGraffiti) + }; + if let Some(input_fee_recipient) = validator_client_config.suggested_fee_recipient { config.validator_store.fee_recipient = Some(input_fee_recipient); } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 71bdde10b02..23541cf6e28 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -486,7 +486,8 @@ impl ProductionValidatorClient { .executor(context.executor.clone()) .chain_spec(context.eth2_config.spec.clone()) .graffiti(config.graffiti) - .graffiti_file(config.graffiti_file.clone()); + .graffiti_file(config.graffiti_file.clone()) + .graffiti_policy(config.graffiti_policy); // If we have proposer nodes, add them to the block service builder. if proposer_nodes_num > 0 { diff --git a/validator_client/validator_services/src/attestation_service.rs b/validator_client/validator_services/src/attestation_service.rs index da6e8f35886..587d4668b8a 100644 --- a/validator_client/validator_services/src/attestation_service.rs +++ b/validator_client/validator_services/src/attestation_service.rs @@ -8,7 +8,7 @@ use std::ops::Deref; use std::sync::Arc; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, Span, debug, error, info, info_span, instrument, trace, warn}; use tree_hash::TreeHash; use types::{Attestation, AttestationData, ChainSpec, CommitteeIndex, EthSpec, Slot}; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -180,8 +180,9 @@ impl AttestationService Result<(), String> { let slot = self.slot_clock.now().ok_or("Failed to read slot clock")?; let duration_to_next_slot = self @@ -189,6 +190,59 @@ impl AttestationService = self.duties_service.attesters(slot).into_iter().collect(); + + // Return early if there is no attestation duties + if attestation_duties.is_empty() { + return Ok(()); + } + + let attestation_service = self.clone(); + + let attestation_data_handle = self + .inner + .executor + .spawn_handle( + async move { + let attestation_data = attestation_service + .beacon_nodes + .first_success(|beacon_node| async move { + let _timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS_HTTP_GET], + ); + beacon_node + .get_validator_attestation_data(slot, 0) + .await + .map_err(|e| format!("Failed to produce attestation data: {:?}", e)) + .map(|result| result.data) + }) + .await + .map_err(|e| e.to_string())?; + + attestation_service + .sign_and_publish_attestations( + slot, + &attestation_duties, + attestation_data.clone(), + ) + .await + .map_err(|e| { + crit!( + error = format!("{:?}", e), + slot = slot.as_u64(), + "Error during attestation routine" + ); + e + })?; + Ok::(attestation_data) + }, + "unaggregated attestation production", + ) + .ok_or("Failed to spawn attestation data task")?; + // If a validator needs to publish an aggregate attestation, they must do so at 2/3 // through the slot. This delay triggers at this time let aggregate_production_instant = Instant::now() @@ -196,7 +250,7 @@ impl AttestationService> = self + let aggregate_duties_by_committee_index: HashMap> = self .duties_service .attesters(slot) .into_iter() @@ -207,24 +261,45 @@ impl AttestationService data, + Ok(Some(Err(err))) => { + error!(?err, "Attestation production failed"); + return; + } + Ok(None) | Err(_) => { + info!("Aborting attestation production due to shutdown"); + return; + } + }; + + // For each committee index for this slot: + // Create and publish `SignedAggregateAndProof` for all aggregating validators. + aggregate_duties_by_committee_index.into_iter().for_each( + |(committee_index, validator_duties)| { + let attestation_service = attestation_service_clone.clone(); + let attestation_data = attestation_data.clone(); + executor.spawn_ignoring_error( + attestation_service.handle_aggregates( + slot, + committee_index, + validator_duties, + aggregate_production_instant, + attestation_data, + ), + "aggregate publish", + ); + }, + ) + }, + "attestation and aggregate publish", + ); // Schedule pruning of the slashing protection database once all unaggregated // attestations have (hopefully) been signed, i.e. at the same time as aggregate @@ -234,109 +309,73 @@ impl AttestationService, aggregate_production_instant: Instant, + attestation_data: AttestationData, ) -> Result<(), ()> { - let attestations_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::ATTESTATIONS], - ); - - // There's not need to produce `Attestation` or `SignedAggregateAndProof` if we do not have + // There's not need to produce `SignedAggregateAndProof` if we do not have // any validators for the given `slot` and `committee_index`. if validator_duties.is_empty() { return Ok(()); } - // Step 1. - // - // Download, sign and publish an `Attestation` for each validator. - let attestation_opt = self - .produce_and_publish_attestations(slot, committee_index, &validator_duties) - .await - .map_err(move |e| { - crit!( - error = format!("{:?}", e), - committee_index, - slot = slot.as_u64(), - "Error during attestation routine" - ) - })?; + // Wait until the `aggregation_production_instant` (2/3rds + // of the way though the slot). As verified in the + // `delay_triggers_when_in_the_past` test, this code will still run + // even if the instant has already elapsed. + sleep_until(aggregate_production_instant).await; - drop(attestations_timer); - - // Step 2. - // - // If an attestation was produced, make an aggregate. - if let Some(attestation_data) = attestation_opt { - // First, wait until the `aggregation_production_instant` (2/3rds - // of the way though the slot). As verified in the - // `delay_triggers_when_in_the_past` test, this code will still run - // even if the instant has already elapsed. - sleep_until(aggregate_production_instant).await; - - // Start the metrics timer *after* we've done the delay. - let _aggregates_timer = validator_metrics::start_timer_vec( - &validator_metrics::ATTESTATION_SERVICE_TIMES, - &[validator_metrics::AGGREGATES], - ); - - // Then download, sign and publish a `SignedAggregateAndProof` for each - // validator that is elected to aggregate for this `slot` and - // `committee_index`. - self.produce_and_publish_aggregates( - &attestation_data, - committee_index, - &validator_duties, - ) + // Start the metrics timer *after* we've done the delay. + let _aggregates_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::AGGREGATES], + ); + + // Download, sign and publish a `SignedAggregateAndProof` for each + // validator that is elected to aggregate for this `slot` and + // `committee_index`. + self.produce_and_publish_aggregates(&attestation_data, committee_index, &validator_duties) .await .map_err(move |e| { crit!( error = format!("{:?}", e), committee_index, slot = slot.as_u64(), - "Error during attestation routine" + "Error during aggregate attestation routine" ) })?; - } Ok(()) } - /// Performs the first step of the attesting process: downloading `Attestation` objects, - /// signing them and returning them to the validator. + /// Performs the main steps of the attesting process: signing and publishing to the BN. /// - /// https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#attesting + /// https://github.com/ethereum/consensus-specs/blob/master/specs/phase0/validator.md#attesting /// /// ## Detail /// /// The given `validator_duties` should already be filtered to only contain those that match - /// `slot` and `committee_index`. Critical errors will be logged if this is not the case. - /// - /// Only one `Attestation` is downloaded from the BN. It is then cloned and signed by each - /// validator and the list of individually-signed `Attestation` objects is returned to the BN. - async fn produce_and_publish_attestations( + /// `slot`. Critical errors will be logged if this is not the case. + #[instrument(skip_all, fields(%slot, %attestation_data.beacon_block_root))] + async fn sign_and_publish_attestations( &self, slot: Slot, - committee_index: CommitteeIndex, validator_duties: &[DutyAndProof], - ) -> Result, String> { - if validator_duties.is_empty() { - return Ok(None); - } + attestation_data: AttestationData, + ) -> Result<(), String> { + let _attestations_timer = validator_metrics::start_timer_vec( + &validator_metrics::ATTESTATION_SERVICE_TIMES, + &[validator_metrics::ATTESTATIONS], + ); let current_epoch = self .slot_clock @@ -344,101 +383,90 @@ impl AttestationService(attestation_data, &self.chain_spec) { - crit!( - validator = ?duty.pubkey, - duty_slot = %duty.slot, - attestation_slot = %attestation_data.slot, - duty_index = duty.committee_index, - attestation_index = attestation_data.index, - "Inconsistent validator duties during signing" - ); - return None; - } + let signing_futures = validator_duties.iter().map(|duty_and_proof| { + async move { + let duty = &duty_and_proof.duty; + let attestation_data = attestation_data_ref; - let mut attestation = match Attestation::empty_for_signing( - duty.committee_index, - duty.committee_length as usize, - attestation_data.slot, - attestation_data.beacon_block_root, - attestation_data.source, - attestation_data.target, - &self.chain_spec, - ) { - Ok(attestation) => attestation, - Err(err) => { + // Ensure that the attestation matches the duties. + if !duty.match_attestation_data::(attestation_data, &self.chain_spec) { crit!( validator = ?duty.pubkey, - ?duty, - ?err, - "Invalid validator duties during signing" + duty_slot = %duty.slot, + attestation_slot = %attestation_data.slot, + duty_index = duty.committee_index, + attestation_index = attestation_data.index, + "Inconsistent validator duties during signing" ); return None; } - }; - match self - .validator_store - .sign_attestation( - duty.pubkey, - duty.validator_committee_index as usize, - &mut attestation, - current_epoch, - ) - .await - { - Ok(()) => Some((attestation, duty.validator_index)), - Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { - // A pubkey can be missing when a validator was recently - // removed via the API. - warn!( - info = "a validator may have recently been removed from this VC", - pubkey = ?pubkey, - validator = ?duty.pubkey, - committee_index = committee_index, - slot = slot.as_u64(), - "Missing pubkey for attestation" - ); - None - } - Err(e) => { - crit!( - error = ?e, - validator = ?duty.pubkey, - committee_index, - slot = slot.as_u64(), - "Failed to sign attestation" - ); - None + let mut attestation = match Attestation::empty_for_signing( + duty.committee_index, + duty.committee_length as usize, + attestation_data.slot, + attestation_data.beacon_block_root, + attestation_data.source, + attestation_data.target, + &self.chain_spec, + ) { + Ok(attestation) => attestation, + Err(err) => { + crit!( + validator = ?duty.pubkey, + ?duty, + ?err, + "Invalid validator duties during signing" + ); + return None; + } + }; + + match self + .validator_store + .sign_attestation( + duty.pubkey, + duty.validator_committee_index as usize, + &mut attestation, + current_epoch, + ) + .await + { + Ok(()) => Some((attestation, duty.validator_index)), + Err(ValidatorStoreError::UnknownPubkey(pubkey)) => { + // A pubkey can be missing when a validator was recently + // removed via the API. + warn!( + info = "a validator may have recently been removed from this VC", + pubkey = ?pubkey, + validator = ?duty.pubkey, + slot = slot.as_u64(), + "Missing pubkey for attestation" + ); + None + } + Err(e) => { + crit!( + error = ?e, + validator = ?duty.pubkey, + slot = slot.as_u64(), + "Failed to sign attestation" + ); + None + } } } + .instrument(Span::current()) }); // Execute all the futures in parallel, collecting any successful results. let (ref attestations, ref validator_indices): (Vec<_>, Vec<_>) = join_all(signing_futures) + .instrument(info_span!( + "sign_attestations", + count = validator_duties.len() + )) .await .into_iter() .flatten() @@ -446,7 +474,7 @@ impl AttestationService AttestationService(single_attestations, fork_name) .await }) + .instrument(info_span!( + "publish_attestations", + count = attestations.len() + )) .await { Ok(()) => info!( @@ -507,7 +539,7 @@ impl AttestationService AttestationService AttestationService AttestationService AttestationService { diff --git a/validator_client/validator_services/src/block_service.rs b/validator_client/validator_services/src/block_service.rs index c111b1f22eb..625f8db7cb9 100644 --- a/validator_client/validator_services/src/block_service.rs +++ b/validator_client/validator_services/src/block_service.rs @@ -1,5 +1,6 @@ use beacon_node_fallback::{ApiTopic, BeaconNodeFallback, Error as FallbackError, Errors}; -use bls::SignatureBytes; +use bls::PublicKeyBytes; +use eth2::types::GraffitiPolicy; use eth2::{BeaconNodeHttpClient, StatusCode}; use graffiti_file::{GraffitiFile, determine_graffiti}; use logging::crit; @@ -11,8 +12,8 @@ use std::sync::Arc; use std::time::Duration; use task_executor::TaskExecutor; use tokio::sync::mpsc; -use tracing::{debug, error, info, trace, warn}; -use types::{BlockType, ChainSpec, EthSpec, Graffiti, PublicKeyBytes, Slot}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; +use types::{BlockType, ChainSpec, EthSpec, Graffiti, Slot}; use validator_store::{Error as ValidatorStoreError, SignedBlock, UnsignedBlock, ValidatorStore}; #[derive(Debug)] @@ -50,6 +51,7 @@ pub struct BlockServiceBuilder { chain_spec: Option>, graffiti: Option, graffiti_file: Option, + graffiti_policy: Option, } impl BlockServiceBuilder { @@ -63,6 +65,7 @@ impl BlockServiceBuilder { chain_spec: None, graffiti: None, graffiti_file: None, + graffiti_policy: None, } } @@ -106,6 +109,11 @@ impl BlockServiceBuilder { self } + pub fn graffiti_policy(mut self, graffiti_policy: Option) -> Self { + self.graffiti_policy = graffiti_policy; + self + } + pub fn build(self) -> Result, String> { Ok(BlockService { inner: Arc::new(Inner { @@ -127,6 +135,7 @@ impl BlockServiceBuilder { proposer_nodes: self.proposer_nodes, graffiti: self.graffiti, graffiti_file: self.graffiti_file, + graffiti_policy: self.graffiti_policy, }), }) } @@ -192,6 +201,7 @@ pub struct Inner { chain_spec: Arc, graffiti: Option, graffiti_file: Option, + graffiti_policy: Option, } /// Attempts to produce attestations for any block producer(s) at the start of the epoch. @@ -298,7 +308,7 @@ impl BlockService { self.inner.executor.spawn( async move { let result = service - .publish_block(slot, validator_pubkey, builder_boost_factor) + .get_validator_block_and_publish_block(slot, validator_pubkey, builder_boost_factor) .await; match result { @@ -320,6 +330,7 @@ impl BlockService { } #[allow(clippy::too_many_arguments)] + #[instrument(skip_all, fields(%slot, ?validator_pubkey))] async fn sign_and_publish_block( &self, proposer_fallback: ProposerFallback, @@ -333,6 +344,7 @@ impl BlockService { let res = self .validator_store .sign_block(*validator_pubkey, unsigned_block, slot) + .instrument(info_span!("sign_block")) .await; let signed_block = match res { @@ -389,7 +401,12 @@ impl BlockService { Ok(()) } - async fn publish_block( + #[instrument( + name = "block_proposal_duty_cycle", + skip_all, + fields(%slot, ?validator_pubkey) + )] + async fn get_validator_block_and_publish_block( self, slot: Slot, validator_pubkey: PublicKeyBytes, @@ -442,33 +459,82 @@ impl BlockService { info!(slot = slot.as_u64(), "Requesting unsigned block"); - // Request block from first responsive beacon node. + // Request an SSZ block from all beacon nodes in order, returning on the first successful response. + // If all nodes fail, run a second pass falling back to JSON. // - // Try the proposer nodes last, since it's likely that they don't have a + // Proposer nodes will always be tried last during each pass since it's likely that they don't have a // great view of attestations on the network. - let unsigned_block = proposer_fallback + let ssz_block_response = proposer_fallback .request_proposers_last(|beacon_node| async move { let _get_timer = validator_metrics::start_timer_vec( &validator_metrics::BLOCK_SERVICE_TIMES, &[validator_metrics::BEACON_BLOCK_HTTP_GET], ); - Self::get_validator_block( - &beacon_node, - slot, - randao_reveal_ref, - graffiti, - proposer_index, - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - }) + beacon_node + .get_validator_blocks_v3_ssz::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await }) - .await?; + .await; + + let block_response = match ssz_block_response { + Ok((ssz_block_response, _metadata)) => ssz_block_response, + Err(e) => { + warn!( + slot = slot.as_u64(), + error = %e, + "SSZ block production failed, falling back to JSON" + ); + + proposer_fallback + .request_proposers_last(|beacon_node| async move { + let _get_timer = validator_metrics::start_timer_vec( + &validator_metrics::BLOCK_SERVICE_TIMES, + &[validator_metrics::BEACON_BLOCK_HTTP_GET], + ); + let (json_block_response, _metadata) = beacon_node + .get_validator_blocks_v3::( + slot, + randao_reveal_ref, + graffiti.as_ref(), + builder_boost_factor, + self_ref.graffiti_policy, + ) + .await + .map_err(|e| { + BlockError::Recoverable(format!( + "Error from beacon node when producing block: {:?}", + e + )) + })?; + + Ok(json_block_response.data) + }) + .await + .map_err(BlockError::from)? + } + }; + + let (block_proposer, unsigned_block) = match block_response { + eth2::types::ProduceBlockV3Response::Full(block) => { + (block.block().proposer_index(), UnsignedBlock::Full(block)) + } + eth2::types::ProduceBlockV3Response::Blinded(block) => { + (block.proposer_index(), UnsignedBlock::Blinded(block)) + } + }; + + info!(slot = slot.as_u64(), "Received unsigned block"); + if proposer_index != Some(block_proposer) { + return Err(BlockError::Recoverable( + "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), + )); + } self_ref .sign_and_publish_block( @@ -483,6 +549,7 @@ impl BlockService { Ok(()) } + #[instrument(skip_all)] async fn publish_signed_block_contents( &self, signed_block: &SignedBlock, @@ -517,70 +584,6 @@ impl BlockService { } Ok::<_, BlockError>(()) } - - async fn get_validator_block( - beacon_node: &BeaconNodeHttpClient, - slot: Slot, - randao_reveal_ref: &SignatureBytes, - graffiti: Option, - proposer_index: Option, - builder_boost_factor: Option, - ) -> Result, BlockError> { - let block_response = match beacon_node - .get_validator_blocks_v3_ssz::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - { - Ok((ssz_block_response, _)) => ssz_block_response, - Err(e) => { - warn!( - slot = slot.as_u64(), - error = %e, - "Beacon node does not support SSZ in block production, falling back to JSON" - ); - - let (json_block_response, _) = beacon_node - .get_validator_blocks_v3::( - slot, - randao_reveal_ref, - graffiti.as_ref(), - builder_boost_factor, - ) - .await - .map_err(|e| { - BlockError::Recoverable(format!( - "Error from beacon node when producing block: {:?}", - e - )) - })?; - - // Extract ProduceBlockV3Response (data field of the struct ForkVersionedResponse) - json_block_response.data - } - }; - - let (block_proposer, unsigned_block) = match block_response { - eth2::types::ProduceBlockV3Response::Full(block) => { - (block.block().proposer_index(), UnsignedBlock::Full(block)) - } - eth2::types::ProduceBlockV3Response::Blinded(block) => { - (block.proposer_index(), UnsignedBlock::Blinded(block)) - } - }; - - info!(slot = slot.as_u64(), "Received unsigned block"); - if proposer_index != Some(block_proposer) { - return Err(BlockError::Recoverable( - "Proposer index does not match block proposer. Beacon chain re-orged".to_string(), - )); - } - - Ok::<_, BlockError>(unsigned_block) - } } /// Wrapper for values we want to log about a block we signed, for easy extraction from the possible diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 7569d3946ab..c2378181ef0 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -10,6 +10,7 @@ use crate::block_service::BlockServiceNotification; use crate::sync::SyncDutiesMap; use crate::sync::poll_sync_committee_duties; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use bls::PublicKeyBytes; use eth2::types::{ AttesterData, BeaconCommitteeSelection, BeaconCommitteeSubscription, DutiesResponse, ProposerData, StateId, ValidatorId, @@ -29,7 +30,7 @@ use std::time::Duration; use task_executor::TaskExecutor; use tokio::{sync::mpsc::Sender, time::sleep}; use tracing::{debug, error, info, warn}; -use types::{ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, SelectionProof, Slot}; +use types::{ChainSpec, Epoch, EthSpec, Hash256, SelectionProof, Slot}; use validator_metrics::{ATTESTATION_DUTY, get_int_gauge, set_int_gauge}; use validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index 77032ed15b4..0f456a70507 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -1,4 +1,5 @@ use crate::duties_service::{DutiesService, Error, SelectionProofConfig}; +use bls::PublicKeyBytes; use eth2::types::SyncCommitteeSelection; use futures::future::join_all; use futures::stream::{FuturesUnordered, StreamExt}; @@ -8,7 +9,7 @@ use slot_clock::SlotClock; use std::collections::{HashMap, HashSet}; use std::sync::Arc; use tracing::{debug, error, info, warn}; -use types::{ChainSpec, EthSpec, PublicKeyBytes, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; +use types::{ChainSpec, EthSpec, Slot, SyncDuty, SyncSelectionProof, SyncSubnetId}; use validator_store::{DoppelgangerStatus, Error as ValidatorStoreError, ValidatorStore}; /// Top-level data-structure containing sync duty information. diff --git a/validator_client/validator_services/src/sync_committee_service.rs b/validator_client/validator_services/src/sync_committee_service.rs index 02f9f24c8a1..28c3d1caadb 100644 --- a/validator_client/validator_services/src/sync_committee_service.rs +++ b/validator_client/validator_services/src/sync_committee_service.rs @@ -1,5 +1,6 @@ use crate::duties_service::DutiesService; use beacon_node_fallback::{ApiTopic, BeaconNodeFallback}; +use bls::PublicKeyBytes; use eth2::types::BlockId; use futures::future::FutureExt; use futures::future::join_all; @@ -11,10 +12,10 @@ use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use task_executor::TaskExecutor; use tokio::time::{Duration, Instant, sleep, sleep_until}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{Instrument, debug, error, info, info_span, instrument, trace, warn}; use types::{ - ChainSpec, EthSpec, Hash256, PublicKeyBytes, Slot, SyncCommitteeSubscription, - SyncContributionData, SyncDuty, SyncSelectionProof, SyncSubnetId, + ChainSpec, EthSpec, Hash256, Slot, SyncCommitteeSubscription, SyncContributionData, SyncDuty, + SyncSelectionProof, SyncSubnetId, }; use validator_store::{Error as ValidatorStoreError, ValidatorStore}; @@ -208,7 +209,8 @@ impl SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService SyncCommitteeService"] [dependencies] +bls = { workspace = true } eth2 = { workspace = true } slashing_protection = { workspace = true } types = { workspace = true } diff --git a/validator_client/validator_store/src/lib.rs b/validator_client/validator_store/src/lib.rs index 6fd2e270649..2b472799d24 100644 --- a/validator_client/validator_store/src/lib.rs +++ b/validator_client/validator_store/src/lib.rs @@ -1,3 +1,4 @@ +use bls::{PublicKeyBytes, Signature}; use eth2::types::{FullBlockContents, PublishBlockRequest}; use slashing_protection::NotSafe; use std::fmt::Debug; @@ -5,9 +6,9 @@ use std::future::Future; use std::sync::Arc; use types::{ Address, Attestation, AttestationError, BlindedBeaconBlock, Epoch, EthSpec, Graffiti, Hash256, - PublicKeyBytes, SelectionProof, Signature, SignedAggregateAndProof, SignedBlindedBeaconBlock, - SignedContributionAndProof, SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, - SyncCommitteeMessage, SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, + SelectionProof, SignedAggregateAndProof, SignedBlindedBeaconBlock, SignedContributionAndProof, + SignedValidatorRegistrationData, Slot, SyncCommitteeContribution, SyncCommitteeMessage, + SyncSelectionProof, SyncSubnetId, ValidatorRegistrationData, }; #[derive(Debug, PartialEq, Clone)] diff --git a/validator_manager/Cargo.toml b/validator_manager/Cargo.toml index 9192f0e86b0..16ce1e023fa 100644 --- a/validator_manager/Cargo.toml +++ b/validator_manager/Cargo.toml @@ -6,9 +6,10 @@ edition = { workspace = true } [dependencies] account_utils = { workspace = true } +bls = { workspace = true } clap = { workspace = true } clap_utils = { workspace = true } -derivative = { workspace = true } +educe = { workspace = true } environment = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } diff --git a/validator_manager/src/common.rs b/validator_manager/src/common.rs index 0e93b257734..a95d2a1fd61 100644 --- a/validator_manager/src/common.rs +++ b/validator_manager/src/common.rs @@ -1,5 +1,6 @@ pub use account_utils::STDIN_INPUTS_FLAG; use account_utils::strip_off_newlines; +use bls::{Keypair, PublicKeyBytes, SignatureBytes}; use eth2::lighthouse_vc::std_types::{InterchangeJsonStr, KeystoreJsonStr}; use eth2::{ SensitiveUrl, diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index 19f78be2ea7..8682705956c 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -1,6 +1,7 @@ use super::common::*; use crate::DumpConfig; use account_utils::{random_password_string, read_mnemonic_from_cli, read_password_from_user}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::{ @@ -586,6 +587,7 @@ async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), S #[cfg(test)] pub mod tests { use super::*; + use bls::SignatureBytes; use eth2_network_config::Eth2NetworkConfig; use regex::Regex; use std::path::Path; diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index 3ff0c9529d7..2421b002aab 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::{ SensitiveUrl, @@ -5,7 +6,6 @@ use eth2::{ }; use serde::{Deserialize, Serialize}; use std::path::PathBuf; -use types::PublicKeyBytes; use crate::{DumpConfig, common::vc_http_client}; diff --git a/validator_manager/src/exit_validators.rs b/validator_manager/src/exit_validators.rs index a6bbf05fb4a..b53d9c0a16d 100644 --- a/validator_manager/src/exit_validators.rs +++ b/validator_manager/src/exit_validators.rs @@ -1,5 +1,6 @@ use crate::{DumpConfig, common::vc_http_client}; +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; use eth2::types::{ConfigAndPreset, Epoch, StateId, ValidatorId, ValidatorStatus}; @@ -10,7 +11,7 @@ use slot_clock::{SlotClock, SystemTimeSlotClock}; use std::fs::write; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; pub const CMD: &str = "exit"; pub const BEACON_URL_FLAG: &str = "beacon-node"; @@ -191,8 +192,7 @@ async fn run(config: ExitConfig) -> Result<(), String> { // Only publish the voluntary exit if the --beacon-node flag is present if let Some(ref beacon_url) = beacon_url { let beacon_node = BeaconNodeHttpClient::new( - SensitiveUrl::parse(beacon_url.as_ref()) - .map_err(|e| format!("Failed to parse beacon http server: {:?}", e))?, + beacon_url.clone(), Timeouts::set_all(Duration::from_secs(12)), ); @@ -399,7 +399,7 @@ mod test { }) .collect(); - let beacon_url = SensitiveUrl::parse(self.beacon_node.client.as_ref()).unwrap(); + let beacon_url = self.beacon_node.client.server().clone(); let validators_to_exit = index_of_validators_to_exit .iter() diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 5f5f049ed97..24917f7d1b4 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -3,7 +3,7 @@ use crate::DumpConfig; use account_utils::eth2_keystore::Keystore; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use derivative::Derivative; +use educe::Educe; use eth2::lighthouse_vc::types::KeystoreJsonStr; use eth2::{SensitiveUrl, lighthouse_vc::std_types::ImportKeystoreStatus}; use serde::{Deserialize, Serialize}; @@ -159,15 +159,15 @@ pub fn cli_app() -> Command { ) } -#[derive(Clone, PartialEq, Serialize, Deserialize, Derivative)] -#[derivative(Debug)] +#[derive(Clone, PartialEq, Serialize, Deserialize, Educe)] +#[educe(Debug)] pub struct ImportConfig { pub validators_file_path: Option, pub keystore_file_path: Option, pub vc_url: SensitiveUrl, pub vc_token_path: PathBuf, pub ignore_duplicates: bool, - #[derivative(Debug = "ignore")] + #[educe(Debug(ignore))] pub password: Option>, pub fee_recipient: Option
, pub gas_limit: Option, diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index b064982adf4..f7a09f8d8e7 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -1,3 +1,4 @@ +use bls::PublicKeyBytes; use clap::{Arg, ArgAction, ArgMatches, Command}; use eth2::lighthouse_vc::types::SingleKeystoreResponse; use eth2::types::{ConfigAndPreset, StateId, ValidatorId, ValidatorStatus}; @@ -5,7 +6,7 @@ use eth2::{BeaconNodeHttpClient, SensitiveUrl, Timeouts}; use serde::{Deserialize, Serialize}; use std::path::PathBuf; use std::time::Duration; -use types::{ChainSpec, EthSpec, PublicKeyBytes}; +use types::{ChainSpec, EthSpec}; use crate::exit_validators::get_current_epoch; use crate::{DumpConfig, common::vc_http_client}; @@ -134,8 +135,7 @@ async fn run(config: ListConfig) -> Result